Back to Annotated Deep Learning Paper Implementations

In-paint images using stable diffusion with a prompt

docs/diffusion/stable_diffusion/scripts/in_paint.html

latest5.5 KB
Original Source

homediffusionstable_diffusionscripts

View code on Github

#

In-paint images using stable diffusion with a prompt

11importargparse12frompathlibimportPath13fromtypingimportOptional1415importtorch1617fromlabmlimportlab,monit18fromlabml\_nn.diffusion.stable\_diffusion.latent\_diffusionimportLatentDiffusion19fromlabml\_nn.diffusion.stable\_diffusion.samplerimportDiffusionSampler20fromlabml\_nn.diffusion.stable\_diffusion.sampler.ddimimportDDIMSampler21fromlabml\_nn.diffusion.stable\_diffusion.utilimportload\_model,save\_images,load\_img,set\_seed

#

Image in-painting class

24classInPaint:

#

28model:LatentDiffusion29sampler:DiffusionSampler

#

  • checkpoint_path is the path of the checkpoint
  • ddim_steps is the number of sampling steps
  • ddim_eta is the DDIM sampling η constant
31def\_\_init\_\_(self,\*,checkpoint\_path:Path,32ddim\_steps:int=50,33ddim\_eta:float=0.0):

#

39self.ddim\_steps=ddim\_steps

#

Load latent diffusion model

42self.model=load\_model(checkpoint\_path)

#

Get device

44self.device=torch.device("cuda:0")iftorch.cuda.is\_available()elsetorch.device("cpu")

#

Move the model to device

46self.model.to(self.device)

#

Initialize DDIM sampler

49self.sampler=DDIMSampler(self.model,50n\_steps=ddim\_steps,51ddim\_eta=ddim\_eta)

#

  • dest_path is the path to store the generated images
  • orig_img is the image to transform
  • strength specifies how much of the original image should not be preserved
  • batch_size is the number of images to generate in a batch
  • prompt is the prompt to generate images with
  • uncond_scale is the unconditional guidance scale s. This is used for ϵθ​(xt​,c)=sϵcond​(xt​,c)+(s−1)ϵcond​(xt​,cu​)
[email protected]\_grad()54def\_\_call\_\_(self,\*,55dest\_path:str,56orig\_img:str,57strength:float,58batch\_size:int=3,59prompt:str,60uncond\_scale:float=5.0,61mask:Optional[torch.Tensor]=None,62):

#

Make a batch of prompts

73prompts=batch\_size\*[prompt]

#

Load image

75orig\_image=load\_img(orig\_img).to(self.device)

#

Encode the image in the latent space and make batch_size copies of it

77orig=self.model.autoencoder\_encode(orig\_image).repeat(batch\_size,1,1,1)

#

If mask is not provided, we set a sample mask to preserve the bottom half of the image

80ifmaskisNone:81mask=torch.zeros\_like(orig,device=self.device)82mask[:,:,mask.shape[2]//2:,:]=1.83else:84mask=mask.to(self.device)

#

Noise diffuse the original image

86orig\_noise=torch.randn(orig.shape,device=self.device)

#

Get the number of steps to diffuse the original

89assert0.\<=strength\<=1.,'can only work with strength in [0.0, 1.0]'90t\_index=int(strength\*self.ddim\_steps)

#

AMP auto casting

93withtorch.cuda.amp.autocast():

#

In unconditional scaling is not 1 get the embeddings for empty prompts (no conditioning).

95ifuncond\_scale!=1.0:96un\_cond=self.model.get\_text\_conditioning(batch\_size\*[""])97else:98un\_cond=None

#

Get the prompt embeddings

100cond=self.model.get\_text\_conditioning(prompts)

#

Add noise to the original image

102x=self.sampler.q\_sample(orig,t\_index,noise=orig\_noise)

#

Reconstruct from the noisy image, while preserving the masked area

104x=self.sampler.paint(x,cond,t\_index,105orig=orig,106mask=mask,107orig\_noise=orig\_noise,108uncond\_scale=uncond\_scale,109uncond\_cond=un\_cond)

#

Decode the image from the autoencoder

111images=self.model.autoencoder\_decode(x)

#

Save images

114save\_images(images,dest\_path,'paint\_')

#

CLI

117defmain():

#

121parser=argparse.ArgumentParser()122123parser.add\_argument(124"--prompt",125type=str,126nargs="?",127default="a painting of a cute monkey playing guitar",128help="the prompt to render"129)130131parser.add\_argument(132"--orig-img",133type=str,134nargs="?",135help="path to the input image"136)137138parser.add\_argument("--batch\_size",type=int,default=4,help="batch size",)139parser.add\_argument("--steps",type=int,default=50,help="number of sampling steps")140141parser.add\_argument("--scale",type=float,default=5.0,142help="unconditional guidance scale: "143"eps = eps(x, empty) + scale \* (eps(x, cond) - eps(x, empty))")144145parser.add\_argument("--strength",type=float,default=0.75,146help="strength for noise: "147" 1.0 corresponds to full destruction of information in init image")148149opt=parser.parse\_args()150set\_seed(42)151152in\_paint=InPaint(checkpoint\_path=lab.get\_data\_path()/'stable-diffusion'/'sd-v1-4.ckpt',153ddim\_steps=opt.steps)154155withmonit.section('Generate'):156in\_paint(dest\_path='outputs',157orig\_img=opt.orig\_img,158strength=opt.strength,159batch\_size=opt.batch\_size,160prompt=opt.prompt,161uncond\_scale=opt.scale)

#

165if\_\_name\_\_=="\_\_main\_\_":166main()

labml.ai