Back to Annotated Deep Learning Paper Implementations

Deep Convolutional Generative Adversarial Networks (DCGAN)

docs/gan/dcgan/index.html

latest3.6 KB
Original Source

homegandcgan

[View code on Github](https://github.com/labmlai/annotated_deep_learning_paper_implementations/tree/master/labml_nn/gan/dcgan/ init.py)

#

Deep Convolutional Generative Adversarial Networks (DCGAN)

This is a PyTorch implementation of paper Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks.

This implementation is based on the PyTorch DCGAN Tutorial.

15importtorch.nnasnn1617fromlabmlimportexperiment18fromlabml.configsimportcalculate19fromlabml\_nn.gan.original.experimentimportConfigs

#

Convolutional Generator Network

This is similar to the de-convolutional network used for CelebA faces, but modified for MNIST images.

22classGenerator(nn.Module):

#

32def\_\_init\_\_(self):33super().\_\_init\_\_()

#

The input is 1×1 with 100 channels

35self.layers=nn.Sequential(

#

This gives 3×3 output

37nn.ConvTranspose2d(100,1024,3,1,0,bias=False),38nn.BatchNorm2d(1024),39nn.ReLU(True),

#

This gives 7×7

41nn.ConvTranspose2d(1024,512,3,2,0,bias=False),42nn.BatchNorm2d(512),43nn.ReLU(True),

#

This gives 14×14

45nn.ConvTranspose2d(512,256,4,2,1,bias=False),46nn.BatchNorm2d(256),47nn.ReLU(True),

#

This gives 28×28

49nn.ConvTranspose2d(256,1,4,2,1,bias=False),50nn.Tanh()51)5253self.apply(\_weights\_init)

#

55defforward(self,x):

#

Change from shape [batch_size, 100] to [batch_size, 100, 1, 1]

57x=x.unsqueeze(-1).unsqueeze(-1)58x=self.layers(x)59returnx

#

Convolutional Discriminator Network

62classDiscriminator(nn.Module):

#

67def\_\_init\_\_(self):68super().\_\_init\_\_()

#

The input is 28×28 with one channel

70self.layers=nn.Sequential(

#

This gives 14×14

72nn.Conv2d(1,256,4,2,1,bias=False),73nn.LeakyReLU(0.2,inplace=True),

#

This gives 7×7

75nn.Conv2d(256,512,4,2,1,bias=False),76nn.BatchNorm2d(512),77nn.LeakyReLU(0.2,inplace=True),

#

This gives 3×3

79nn.Conv2d(512,1024,3,2,0,bias=False),80nn.BatchNorm2d(1024),81nn.LeakyReLU(0.2,inplace=True),

#

This gives 1×1

83nn.Conv2d(1024,1,3,1,0,bias=False),84)85self.apply(\_weights\_init)

#

87defforward(self,x):88x=self.layers(x)89returnx.view(x.shape[0],-1)

#

92def\_weights\_init(m):93classname=m.\_\_class\_\_.\_\_name\_\_94ifclassname.find('Conv')!=-1:95nn.init.normal\_(m.weight.data,0.0,0.02)96elifclassname.find('BatchNorm')!=-1:97nn.init.normal\_(m.weight.data,1.0,0.02)98nn.init.constant\_(m.bias.data,0)

#

We import the simple gan experiment and change the generator and discriminator networks

103calculate(Configs.generator,'cnn',lambdac:Generator().to(c.device))104calculate(Configs.discriminator,'cnn',lambdac:Discriminator().to(c.device))

#

107defmain():108conf=Configs()109experiment.create(name='mnist\_dcgan')110experiment.configs(conf,111{'discriminator':'cnn',112'generator':'cnn',113'label\_smoothing':0.01})114withexperiment.start():115conf.run()116117118if\_\_name\_\_=='\_\_main\_\_':119main()

labml.ai