Back to Annotated Deep Learning Paper Implementations

CIFAR10 Experiment for Group Normalization

docs/normalization/batch_norm/cifar10.html

latest1.8 KB
Original Source

homenormalizationbatch_norm

View code on Github

#

CIFAR10 Experiment for Group Normalization

12importtorch.nnasnn1314fromlabmlimportexperiment15fromlabml.configsimportoption16fromlabml\_nn.experiments.cifar10importCIFAR10Configs,CIFAR10VGGModel17fromlabml\_nn.normalization.batch\_normimportBatchNorm

#

VGG model for CIFAR-10 classification

This derives from the generic VGG style architecture.

20classModel(CIFAR10VGGModel):

#

27defconv\_block(self,in\_channels,out\_channels)-\>nn.Module:28returnnn.Sequential(29nn.Conv2d(in\_channels,out\_channels,kernel\_size=3,padding=1),30BatchNorm(out\_channels),31nn.ReLU(inplace=True),32)

#

34def\_\_init\_\_(self):35super().\_\_init\_\_([[64,64],[128,128],[256,256,256],[512,512,512],[512,512,512]])

#

Create model

38@option(CIFAR10Configs.model)39defmodel(c:CIFAR10Configs):

#

43returnModel().to(c.device)

#

46defmain():

#

Create experiment

48experiment.create(name='cifar10',comment='batch norm')

#

Create configurations

50conf=CIFAR10Configs()

#

Load configurations

52experiment.configs(conf,{53'optimizer.optimizer':'Adam',54'optimizer.learning\_rate':2.5e-4,55'train\_batch\_size':64,56})

#

Start the experiment and run the training loop

58withexperiment.start():59conf.run()

#

63if\_\_name\_\_=='\_\_main\_\_':64main()

labml.ai