[docs]classNAdam(Optimizer):def__init__(self,params,lr=2e-3,betas=(0.9,0.999),eps=1e-8,weight_decay=0,momentum_decay=4e-3,*,foreach:Optional[bool]=None,differentiable:bool=False):ifnot0.0<=lr:raiseValueError("Invalid learning rate: {}".format(lr))ifnot0.0<=eps:raiseValueError("Invalid epsilon value: {}".format(eps))ifnot0.0<=betas[0]<1.0:raiseValueError("Invalid beta parameter at index 0: {}".format(betas[0]))ifnot0.0<=betas[1]<1.0:raiseValueError("Invalid beta parameter at index 1: {}".format(betas[1]))ifnot0.0<=weight_decay:raiseValueError("Invalid weight_decay value: {}".format(weight_decay))ifnot0.0<=momentum_decay:raiseValueError("Invalid momentum_decay value: {}".format(momentum_decay))defaults=dict(lr=lr,betas=betas,eps=eps,weight_decay=weight_decay,momentum_decay=momentum_decay,foreach=foreach,differentiable=differentiable)super().__init__(params,defaults)def__setstate__(self,state):super().__setstate__(state)forgroupinself.param_groups:group.setdefault('foreach',None)group.setdefault('differentiable',False)state_values=list(self.state.values())step_is_tensor=(len(state_values)!=0)andtorch.is_tensor(state_values[0]['step'])ifnotstep_is_tensor:forsinstate_values:s['step']=torch.tensor(float(s['step']))mu_product_is_tensor=(len(state_values)!=0)andtorch.is_tensor(state_values[0]['mu_product'])ifnotmu_product_is_tensor:forsinstate_values:s['mu_product']=torch.tensor(s['mu_product'])def_init_group(self,group,params_with_grad,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps):forpingroup['params']:ifp.gradisnotNone:params_with_grad.append(p)ifp.grad.is_sparse:raiseRuntimeError('NAdam does not support sparse gradients')grads.append(p.grad)state=self.state[p]# Lazy state initializationiflen(state)==0:state['step']=torch.tensor(0.)state['mu_product']=torch.tensor(1.)# Exponential moving average of gradient valuesstate['exp_avg']=torch.zeros_like(p,memory_format=torch.preserve_format)# Exponential moving average of squared gradient valuesstate['exp_avg_sq']=torch.zeros_like(p,memory_format=torch.preserve_format)exp_avgs.append(state['exp_avg'])exp_avg_sqs.append(state['exp_avg_sq'])mu_products.append(state['mu_product'])state_steps.append(state['step'])@_use_grad_for_differentiabledefstep(self,closure=None):"""Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """loss=NoneifclosureisnotNone:withtorch.enable_grad():loss=closure()forgroupinself.param_groups:params_with_grad=[]grads=[]exp_avgs=[]exp_avg_sqs=[]mu_products=[]state_steps=[]beta1,beta2=group['betas']self._init_group(group,params_with_grad,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps)nadam(params_with_grad,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps,beta1=beta1,beta2=beta2,lr=group['lr'],weight_decay=group['weight_decay'],momentum_decay=group['momentum_decay'],eps=group['eps'],foreach=group['foreach'],differentiable=group['differentiable'])returnloss
NAdam.__doc__=r"""Implements NAdam algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma_t \text{ (lr)}, \: \beta_1,\beta_2 \text{ (betas)}, \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ &\hspace{13mm} \: \lambda \text{ (weight decay)}, \:\psi \text{ (momentum decay)} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, v_0 \leftarrow 0 \text{ ( second moment)} \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}if \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm} \mu_t \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{t \psi} \big) \\ &\hspace{5mm} \mu_{t+1} \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{(t+1)\psi}\big)\\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ &\hspace{5mm}\widehat{m_t} \leftarrow \mu_{t+1} m_t/(1-\prod_{i=1}^{t+1}\mu_i)\\[-1.ex] & \hspace{11mm} + (1-\mu_t) g_t /(1-\prod_{i=1}^{t} \mu_{i}) \\ &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_. """+r""" Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) momentum_decay (float, optional): momentum momentum_decay (default: 4e-3){foreach}{differentiable} .. _Incorporating Nesterov Momentum into Adam: https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ """.format(foreach=_foreach_doc,differentiable=_differentiable_doc)defnadam(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_avg_sqs:List[Tensor],mu_products:List[Tensor],state_steps:List[Tensor],# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627# setting this as kwarg for now as functional API is compiled by torch/distributed/optimforeach:Optional[bool]=None,differentiable:bool=False,*,beta1:float,beta2:float,lr:float,weight_decay:float,momentum_decay:float,eps:float):r"""Functional API that performs NAdam algorithm computation. See :class:`~torch.optim.NAdam` for details. """ifnotall(isinstance(t,torch.Tensor)fortinstate_steps):raiseRuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")ifnotall(isinstance(t,torch.Tensor)fortinmu_products):raiseRuntimeError("API has changed, `mu_products` argument must contain a list of singleton tensors")ifforeachisNone:_,foreach=_default_to_fused_or_foreach(params,differentiable,use_fused=False)ifforeachandtorch.jit.is_scripting():raiseRuntimeError('torch.jit.script not supported with foreach optimizers')ifforeachandnottorch.jit.is_scripting():func=_multi_tensor_nadamelse:func=_single_tensor_nadamfunc(params,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps,beta1=beta1,beta2=beta2,lr=lr,weight_decay=weight_decay,momentum_decay=momentum_decay,eps=eps,differentiable=differentiable)def_single_tensor_nadam(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_avg_sqs:List[Tensor],mu_products:List[Tensor],state_steps:List[Tensor],*,beta1:float,beta2:float,lr:float,weight_decay:float,momentum_decay:float,eps:float,differentiable:bool):fori,paraminenumerate(params):grad=grads[i]exp_avg=exp_avgs[i]exp_avg_sq=exp_avg_sqs[i]mu_product=mu_products[i]step_t=state_steps[i]# update stepstep_t+=1step=_get_value(step_t)bias_correction2=1-beta2**stepifweight_decay!=0:grad=grad.add(param,alpha=weight_decay)# calculate the momentum cache \mu^{t} and \mu^{t+1}mu=beta1*(1.-0.5*(0.96**(step*momentum_decay)))mu_next=beta1*(1.-0.5*(0.96**((step+1)*momentum_decay)))# update mu_productmu_product*=mu# decay the first and second moment running average coefficientexp_avg.mul_(beta1).add_(grad,alpha=1-beta1)exp_avg_sq.mul_(beta2).addcmul_(grad,grad,value=1-beta2)denom=exp_avg_sq.div(bias_correction2).sqrt()ifdifferentiable:denom=denom.add(eps)# Make autograd track the operations# by updating the grad and exp_avg directly and not using the# scalar "value" argument of addcdiv.mu_product_next=mu_product*mu_nextgrad=grad*(-lr*(1.-mu)/(1.-mu_product))exp_avg=grad*(-lr*(1.-mu_next)/(1.-mu_product_next))param.addcdiv_(grad,denom)param.addcdiv_(exp_avg,denom)else:mu_product_next=_get_value(mu_product)*mu_nextdenom.add_(eps)param.addcdiv_(grad,denom,value=(-lr*(1.-mu)/(1.-_get_value(mu_product))))param.addcdiv_(exp_avg,denom,value=(-lr*mu_next)/(1.-mu_product_next))def_multi_tensor_nadam(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_avg_sqs:List[Tensor],mu_products:List[Tensor],state_steps:List[Tensor],*,beta1:float,beta2:float,lr:float,weight_decay:float,momentum_decay:float,eps:float,differentiable:bool):iflen(params)==0:returnassertnotdifferentiable,"_foreach ops don't support autograd"grouped_tensors=_group_tensors_by_device_and_dtype([params,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps])for(grouped_params,grouped_grads,grouped_exp_avgs,grouped_exp_avg_sqs,grouped_mu_products,grouped_state_steps)ingrouped_tensors.values():# update stepstorch._foreach_add_(grouped_state_steps,1)bias_correction2=[1-beta2**_get_value(step)forstepingrouped_state_steps]mus=[beta1*(1.-0.5*(0.96**(_get_value(step)*momentum_decay)))forstepingrouped_state_steps]mu_nexts=[beta1*(1.-0.5*(0.96**((_get_value(step)+1)*momentum_decay)))forstepingrouped_state_steps]# update mu_productstorch._foreach_mul_(grouped_mu_products,mus)ifweight_decay!=0:grouped_grads=torch._foreach_add(grouped_grads,grouped_params,alpha=weight_decay)# Decay the first and second moment running average coefficienttorch._foreach_mul_(grouped_exp_avgs,beta1)torch._foreach_add_(grouped_exp_avgs,grouped_grads,alpha=1-beta1)torch._foreach_mul_(grouped_exp_avg_sqs,beta2)torch._foreach_addcmul_(grouped_exp_avg_sqs,grouped_grads,grouped_grads,1-beta2)exp_avg_sq_sqrt=torch._foreach_sqrt(grouped_exp_avg_sqs)bias_correction_sqrt=[_dispatch_sqrt(bc)forbcinbias_correction2]torch._foreach_div_(exp_avg_sq_sqrt,bias_correction_sqrt)denom=torch._foreach_add(exp_avg_sq_sqrt,eps)step_size_grads=_stack_if_compiling([(lr*(1.-mu)/(1.-_get_value(mu_product)))*-1formu_product,muinzip(grouped_mu_products,mus)])step_size_expavg=_stack_if_compiling([(lr*mu_next/(1.-_get_value(mu_product)*mu_next))*-1formu_product,mu_nextinzip(grouped_mu_products,mu_nexts)])torch._foreach_addcdiv_(grouped_params,grouped_grads,denom,step_size_grads)torch._foreach_addcdiv_(grouped_params,grouped_exp_avgs,denom,step_size_expavg)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.