docs/helpers/optimizer.html
1fromtypingimportTuple23importtorch4fromlabmlimporttracker56fromlabml.configsimportBaseConfigs,option,meta\_config
This creates a configurable optimizer.
Arguments: learning_rate (float): Learning rate of the optimizer. Defaults to 0.01 . momentum (float): Momentum of the optimizer. Defaults to 0.5 . parameters: Model parameters to optimize. d_model (int): Embedding size of the model (for Noam optimizer). betas (Tuplefloat, float): Betas for Adam optimizer. Defaults to (0.9, 0.999) . eps (float): Epsilon for Adam/RMSProp optimizers. Defaults to 1e-8 . step_factor (int): Step factor for Noam optimizer. Defaults to 1024 .
Also there is a better (more options) implementation in labml\_nn . We recommend using that <https://nn.labml.ai/optimizers/configs.html> _.
9classOptimizerConfigs(BaseConfigs):
26optimizer:torch.optim.Adam27learning\_rate:float=0.0128momentum:float=0.529parameters:any30d\_model:int31betas:Tuple[float,float]=(0.9,0.999)32eps:float=1e-833step\_factor:int=1024
35def\_\_init\_\_(self):36super().\_\_init\_\_(\_primary='optimizer')373839meta\_config(OptimizerConfigs.parameters)
42@option(OptimizerConfigs.optimizer,'SGD')43defsgd\_optimizer(c:OptimizerConfigs):44returntorch.optim.SGD(c.parameters,c.learning\_rate,c.momentum)454647@option(OptimizerConfigs.optimizer,'Adam')48defadam\_optimizer(c:OptimizerConfigs):49returntorch.optim.Adam(c.parameters,lr=c.learning\_rate,50betas=c.betas,eps=c.eps)515253classNoamOpt:54def\_\_init\_\_(self,model\_size:int,learning\_rate:float,warmup:int,step\_factor:int,optimizer):55self.step\_factor=step\_factor56self.optimizer=optimizer57self.warmup=warmup58self.learning\_rate=learning\_rate59self.model\_size=model\_size60self.\_rate=06162defstep(self):63rate=self.rate(tracker.get\_global\_step()/self.step\_factor)64forpinself.optimizer.param\_groups:65p['lr']=rate66self.\_rate=rate67self.optimizer.step()6869defrate(self,step):70factor=self.model\_size\*\*(-0.5)\*min(step\*\*(-0.5),step\*self.warmup\*\*(-1.5))71returnself.learning\_rate\*factor7273defzero\_grad(self):74self.optimizer.zero\_grad()757677@option(OptimizerConfigs.optimizer,'Noam')78defnoam\_optimizer(c:OptimizerConfigs):79optimizer=torch.optim.Adam(c.parameters,lr=0.0,betas=c.betas,eps=c.eps)80returnNoamOpt(c.d\_model,1,2000,c.step\_factor,optimizer)818283def\_test\_noam\_optimizer():84importmatplotlib.pyplotasplt85importnumpyasnp8687opts=[NoamOpt(512,1,4000,None),88NoamOpt(512,1,8000,None),89NoamOpt(2048,1,2000,None)]90plt.plot(np.arange(1,20000),[[opt.rate(i)foroptinopts]foriinrange(1,20000)])91plt.legend(["512:4000","512:8000","256:4000"])92plt.title("Optimizer")93plt.show()949596if\_\_name\_\_=='\_\_main\_\_':97\_test\_noam\_optimizer()