index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
715,402 |
pytorch_optimizer.optimizer.adams
|
AdamS
|
Adam with stable weight decay.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the AMSBound variant.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdamS(Optimizer, BaseOptimizer):
r"""Adam with stable weight decay.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the AMSBound variant.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 1e-4,
weight_decouple: bool = True,
fixed_decay: bool = False,
ams_bound: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdamS'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
param_size: int = 0
exp_avg_sq_hat_sum: float = 0.0
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
param_size += p.numel()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
state['step'] += 1
bias_correction2: float = 1.0 - beta2 ** state['step']
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
if group['ams_bound']:
max_exp_avg_sq = state['max_exp_avg_sq']
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
exp_avg_sq_hat = max_exp_avg_sq
else:
exp_avg_sq_hat = exp_avg_sq
exp_avg_sq_hat_sum += exp_avg_sq_hat.sum() / bias_correction2
if param_size == 0:
raise ZeroParameterSizeError()
exp_avg_sq_hat_mean: float = math.sqrt(exp_avg_sq_hat_sum / param_size) + self.defaults['eps']
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
ratio=1.0 / exp_avg_sq_hat_mean,
)
bias_correction1: float = 1.0 - beta1 ** state['step']
bias_correction2: float = 1.0 - beta2 ** state['step']
exp_avg_sq_hat = state['max_exp_avg_sq'] if group['ams_bound'] else state['exp_avg_sq']
exp_avg_sq_hat.div_(bias_correction2)
de_nom = exp_avg_sq_hat.sqrt().add_(group['eps'])
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'],
bias_correction1=bias_correction1,
)
p.addcdiv_(state['exp_avg'], de_nom, value=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0001, weight_decouple: bool = True, fixed_decay: bool = False, ams_bound: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,404 |
pytorch_optimizer.optimizer.adams
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 1e-4,
weight_decouple: bool = True,
fixed_decay: bool = False,
ams_bound: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0001, weight_decouple: bool = True, fixed_decay: bool = False, ams_bound: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,407 |
pytorch_optimizer.optimizer.adams
|
__str__
| null |
def __str__(self) -> str:
return 'AdamS'
|
(self) -> str
|
715,428 |
pytorch_optimizer.optimizer.adams
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
param_size: int = 0
exp_avg_sq_hat_sum: float = 0.0
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
param_size += p.numel()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
state['step'] += 1
bias_correction2: float = 1.0 - beta2 ** state['step']
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
if group['ams_bound']:
max_exp_avg_sq = state['max_exp_avg_sq']
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
exp_avg_sq_hat = max_exp_avg_sq
else:
exp_avg_sq_hat = exp_avg_sq
exp_avg_sq_hat_sum += exp_avg_sq_hat.sum() / bias_correction2
if param_size == 0:
raise ZeroParameterSizeError()
exp_avg_sq_hat_mean: float = math.sqrt(exp_avg_sq_hat_sum / param_size) + self.defaults['eps']
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
ratio=1.0 / exp_avg_sq_hat_mean,
)
bias_correction1: float = 1.0 - beta1 ** state['step']
bias_correction2: float = 1.0 - beta2 ** state['step']
exp_avg_sq_hat = state['max_exp_avg_sq'] if group['ams_bound'] else state['exp_avg_sq']
exp_avg_sq_hat.div_(bias_correction2)
de_nom = exp_avg_sq_hat.sqrt().add_(group['eps'])
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'],
bias_correction1=bias_correction1,
)
p.addcdiv_(state['exp_avg'], de_nom, value=-step_size)
return loss
|
(self)
|
715,443 |
pytorch_optimizer.optimizer.adan
|
Adan
|
Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. decoupled weight decay.
:param max_grad_norm: float. max gradient norm to clip.
:param use_gc: bool. use gradient centralization.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Adan(Optimizer, BaseOptimizer):
r"""Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. decoupled weight decay.
:param max_grad_norm: float. max gradient norm to clip.
:param use_gc: bool. use gradient centralization.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.98, 0.92, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
max_grad_norm: float = 0.0,
use_gc: bool = False,
r: float = 0.95,
adanorm: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(max_grad_norm, 'max_grad_norm')
self.validate_non_negative(eps, 'eps')
self.max_grad_norm = max_grad_norm
self.use_gc = use_gc
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'max_grad_norm': max_grad_norm,
'adanorm': adanorm,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Adan'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
state['previous_grad'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def get_global_gradient_norm(self) -> Union[torch.Tensor, float]:
if self.defaults['max_grad_norm'] == 0.0:
return 1.0
global_grad_norm = get_global_gradient_norm(self.param_groups, self.param_groups[0]['params'][0].device)
global_grad_norm.sqrt_().add_(self.defaults['eps'])
return torch.clamp(self.defaults['max_grad_norm'] / global_grad_norm, max=1.0)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
clip_global_grad_norm = self.get_global_gradient_norm()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2, beta3 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2: float = 1.0 - beta2 ** group['step']
bias_correction3_sq: float = math.sqrt(1.0 - beta3 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
state['previous_grad'] = grad.clone().mul_(-clip_global_grad_norm)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
grad.mul_(clip_global_grad_norm)
if self.use_gc:
centralize_gradient(grad, gc_conv_only=False)
grad_diff = state['previous_grad']
grad_diff.add_(grad)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_diff.mul_(beta2).add_(grad_diff, alpha=1.0 - beta2)
grad_diff.mul_(beta2).add_(grad)
exp_avg_sq.mul_(beta3).addcmul_(grad_diff, grad_diff, value=1.0 - beta3)
de_nom = exp_avg_sq.sqrt().div_(bias_correction3_sq).add_(group['eps'])
if group['weight_decouple']:
p.mul_(1.0 - group['lr'] * group['weight_decay'])
p.addcdiv_(exp_avg, de_nom, value=-group['lr'] / bias_correction1)
p.addcdiv_(exp_avg_diff, de_nom, value=-group['lr'] * beta2 / bias_correction2)
if not group['weight_decouple']:
p.div_(1.0 + group['lr'] * group['weight_decay'])
state['previous_grad'].copy_(-grad)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.98, 0.92, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, max_grad_norm: float = 0.0, use_gc: bool = False, r: float = 0.95, adanorm: bool = False, eps: float = 1e-08)
|
715,445 |
pytorch_optimizer.optimizer.adan
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.98, 0.92, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
max_grad_norm: float = 0.0,
use_gc: bool = False,
r: float = 0.95,
adanorm: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(max_grad_norm, 'max_grad_norm')
self.validate_non_negative(eps, 'eps')
self.max_grad_norm = max_grad_norm
self.use_gc = use_gc
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'max_grad_norm': max_grad_norm,
'adanorm': adanorm,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.98, 0.92, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, max_grad_norm: float = 0.0, use_gc: bool = False, r: float = 0.95, adanorm: bool = False, eps: float = 1e-08)
|
715,448 |
pytorch_optimizer.optimizer.adan
|
__str__
| null |
def __str__(self) -> str:
return 'Adan'
|
(self) -> str
|
715,460 |
pytorch_optimizer.optimizer.adan
|
get_global_gradient_norm
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
clip_global_grad_norm = self.get_global_gradient_norm()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2, beta3 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2: float = 1.0 - beta2 ** group['step']
bias_correction3_sq: float = math.sqrt(1.0 - beta3 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
state['previous_grad'] = grad.clone().mul_(-clip_global_grad_norm)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
grad.mul_(clip_global_grad_norm)
if self.use_gc:
centralize_gradient(grad, gc_conv_only=False)
grad_diff = state['previous_grad']
grad_diff.add_(grad)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_diff.mul_(beta2).add_(grad_diff, alpha=1.0 - beta2)
grad_diff.mul_(beta2).add_(grad)
exp_avg_sq.mul_(beta3).addcmul_(grad_diff, grad_diff, value=1.0 - beta3)
de_nom = exp_avg_sq.sqrt().div_(bias_correction3_sq).add_(group['eps'])
if group['weight_decouple']:
p.mul_(1.0 - group['lr'] * group['weight_decay'])
p.addcdiv_(exp_avg, de_nom, value=-group['lr'] / bias_correction1)
p.addcdiv_(exp_avg_diff, de_nom, value=-group['lr'] * beta2 / bias_correction2)
if not group['weight_decouple']:
p.div_(1.0 + group['lr'] * group['weight_decay'])
state['previous_grad'].copy_(-grad)
return loss
|
(self) -> Union[torch.Tensor, float]
|
715,485 |
pytorch_optimizer.optimizer.aggmo
|
AggMo
|
Aggregated Momentum: Stability Through Passive Damping.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
|
class AggMo(Optimizer, BaseOptimizer):
r"""Aggregated Momentum: Stability Through Passive Damping.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.0, 0.9, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AggMo'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['momentum_buffer'] = {beta: torch.zeros_like(p) for beta in group['betas']}
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
betas = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum_buffer'] = {beta: torch.zeros_like(p) for beta in betas}
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
for beta in betas:
buf = state['momentum_buffer'][beta]
buf.mul_(beta).add_(grad)
p.add_(buf, alpha=-group['lr'] / len(betas))
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.0, 0.9, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
715,487 |
pytorch_optimizer.optimizer.aggmo
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.0, 0.9, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.0, 0.9, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
715,490 |
pytorch_optimizer.optimizer.aggmo
|
__str__
| null |
def __str__(self) -> str:
return 'AggMo'
|
(self) -> str
|
715,526 |
pytorch_optimizer.optimizer.alig
|
AliG
|
Adaptive Learning Rates for Interpolation with Gradients.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param max_lr: Optional[float]. max learning rate.
:param projection_fn: Callable. projection function to enforce constraints.
:param momentum: float. momentum.
:param adjusted_momentum: bool. if True, use pytorch-like momentum, instead of standard Nesterov momentum.
|
class AliG(Optimizer, BaseOptimizer):
r"""Adaptive Learning Rates for Interpolation with Gradients.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param max_lr: Optional[float]. max learning rate.
:param projection_fn: Callable. projection function to enforce constraints.
:param momentum: float. momentum.
:param adjusted_momentum: bool. if True, use pytorch-like momentum, instead of standard Nesterov momentum.
"""
def __init__(
self,
params: PARAMETERS,
max_lr: Optional[float] = None,
projection_fn: Optional[Callable] = None,
momentum: float = 0.0,
adjusted_momentum: bool = False,
):
self.validate_learning_rate(max_lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.projection_fn = projection_fn
defaults: DEFAULTS = {'max_lr': max_lr, 'adjusted_momentum': adjusted_momentum, 'momentum': momentum}
super().__init__(params, defaults)
if self.projection_fn is not None:
self.projection_fn()
def __str__(self) -> str:
return 'AliG'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if group['momentum'] > 0.0:
state['momentum_buffer'] = torch.zeros_like(p)
@torch.no_grad()
def compute_step_size(self, loss: float) -> float:
r"""Compute step_size."""
global_grad_norm = get_global_gradient_norm(self.param_groups, torch.device('cpu'))
global_grad_norm.add_(1e-6)
return loss / global_grad_norm.item()
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
if closure is None:
raise NoClosureError('AliG', '(e.g. `optimizer.step(lambda: float(loss))`).')
loss = closure()
un_clipped_step_size: float = self.compute_step_size(loss)
for group in self.param_groups:
step_size = group['step_size'] = (
min(un_clipped_step_size, group['max_lr']) if group['max_lr'] is not None else un_clipped_step_size
)
momentum = group['momentum']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0 and momentum > 0.0:
state['momentum_buffer'] = torch.zeros_like(p)
p.add_(grad, alpha=-step_size)
if momentum > 0.0:
buffer = state['momentum_buffer']
if group['adjusted_momentum']:
buffer.mul_(momentum).sub_(grad)
p.add_(buffer, alpha=step_size * momentum)
else:
buffer.mul_(momentum).add_(grad, alpha=-step_size)
p.add_(buffer, alpha=momentum)
if self.projection_fn is not None:
self.projection_fn()
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], max_lr: Optional[float] = None, projection_fn: Optional[Callable] = None, momentum: float = 0.0, adjusted_momentum: bool = False)
|
715,528 |
pytorch_optimizer.optimizer.alig
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
max_lr: Optional[float] = None,
projection_fn: Optional[Callable] = None,
momentum: float = 0.0,
adjusted_momentum: bool = False,
):
self.validate_learning_rate(max_lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.projection_fn = projection_fn
defaults: DEFAULTS = {'max_lr': max_lr, 'adjusted_momentum': adjusted_momentum, 'momentum': momentum}
super().__init__(params, defaults)
if self.projection_fn is not None:
self.projection_fn()
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], max_lr: Optional[float] = None, projection_fn: Optional[Callable] = None, momentum: float = 0.0, adjusted_momentum: bool = False)
|
715,531 |
pytorch_optimizer.optimizer.alig
|
__str__
| null |
def __str__(self) -> str:
return 'AliG'
|
(self) -> str
|
715,542 |
pytorch_optimizer.optimizer.alig
|
compute_step_size
|
Compute step_size.
| null |
(self, loss: float) -> float
|
715,568 |
pytorch_optimizer.optimizer.amos
|
Amos
|
An Adam-style Optimizer with Adaptive Weight Decay towards Model-Oriented Scale.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. A float slightly < 1. We recommend setting `1 - beta` to the same order of magnitude
as the learning rate. similarity with beta2 in Adam.
:param momentum: float. Exponential decay rate for optional moving average of updates.
:param extra_l2: float. Additional L2 regularization.
:param c_coef: float. Coefficient for decay_factor_c.
:param d_coef: float. Coefficient for decay_factor_d.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Amos(Optimizer, BaseOptimizer):
r"""An Adam-style Optimizer with Adaptive Weight Decay towards Model-Oriented Scale.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. A float slightly < 1. We recommend setting `1 - beta` to the same order of magnitude
as the learning rate. similarity with beta2 in Adam.
:param momentum: float. Exponential decay rate for optional moving average of updates.
:param extra_l2: float. Additional L2 regularization.
:param c_coef: float. Coefficient for decay_factor_c.
:param d_coef: float. Coefficient for decay_factor_d.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
beta: float = 0.999,
momentum: float = 0.0,
extra_l2: float = 0.0,
c_coef: float = 0.25,
d_coef: float = 0.25,
eps: float = 1e-18,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0, range_type='[)')
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[)')
self.validate_non_negative(extra_l2, 'extra_l2')
self.validate_non_negative(eps, 'eps')
self.c_coef = c_coef
self.d_coef = d_coef
defaults: DEFAULTS = {
'lr': lr,
'beta': beta,
'momentum': momentum,
'extra_l2': extra_l2,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Amos'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg_sq'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
state['decay'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
if group['momentum'] > 0.0:
state['exp_avg'] = torch.zeros_like(p)
@staticmethod
def get_scale(p: torch.Tensor) -> float:
r"""Get expected scale for model weights."""
if len(p.shape) == 1: # expected 'bias'
return 0.5
if len(p.shape) == 2: # expected Embedding, Linear, ...
return math.sqrt(2 / p.size(1))
return math.sqrt(1 / p.size(1))
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
momentum, beta = group['momentum'], group['beta']
lr_sq: float = math.sqrt(group['lr'])
bias_correction: float = 1.0 - beta ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg_sq'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
state['decay'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
if group['momentum'] > 0.0:
state['exp_avg'] = torch.zeros_like(p)
g2 = grad.pow(2).mean()
init_lr: float = group['lr'] * self.get_scale(p)
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta).add_(g2, alpha=1.0 - beta)
r_v_hat = bias_correction / (exp_avg_sq + group['eps'])
b = state['decay']
decay_factor_c = torch.rsqrt(1.0 + self.c_coef * lr_sq * b)
decay_factor_d = torch.reciprocal(1.0 + self.d_coef * math.sqrt(init_lr) * b)
gamma = decay_factor_c * (group['lr'] ** 2) * r_v_hat * g2
update = p.clone()
update.mul_((gamma - group['extra_l2']) / 2.0)
update.add_(r_v_hat.sqrt() * grad, alpha=init_lr)
update.mul_(decay_factor_d)
b.mul_(1.0 + gamma).add_(gamma)
if momentum > 0.0:
exp_avg = state['exp_avg']
exp_avg.mul_(momentum).add_(update, alpha=1.0 - momentum)
update.copy_(exp_avg)
p.add_(-update)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, beta: float = 0.999, momentum: float = 0.0, extra_l2: float = 0.0, c_coef: float = 0.25, d_coef: float = 0.25, eps: float = 1e-18)
|
715,570 |
pytorch_optimizer.optimizer.amos
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
beta: float = 0.999,
momentum: float = 0.0,
extra_l2: float = 0.0,
c_coef: float = 0.25,
d_coef: float = 0.25,
eps: float = 1e-18,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0, range_type='[)')
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[)')
self.validate_non_negative(extra_l2, 'extra_l2')
self.validate_non_negative(eps, 'eps')
self.c_coef = c_coef
self.d_coef = d_coef
defaults: DEFAULTS = {
'lr': lr,
'beta': beta,
'momentum': momentum,
'extra_l2': extra_l2,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, beta: float = 0.999, momentum: float = 0.0, extra_l2: float = 0.0, c_coef: float = 0.25, d_coef: float = 0.25, eps: float = 1e-18)
|
715,573 |
pytorch_optimizer.optimizer.amos
|
__str__
| null |
def __str__(self) -> str:
return 'Amos'
|
(self) -> str
|
715,586 |
pytorch_optimizer.optimizer.amos
|
get_scale
|
Get expected scale for model weights.
|
@staticmethod
def get_scale(p: torch.Tensor) -> float:
r"""Get expected scale for model weights."""
if len(p.shape) == 1: # expected 'bias'
return 0.5
if len(p.shape) == 2: # expected Embedding, Linear, ...
return math.sqrt(2 / p.size(1))
return math.sqrt(1 / p.size(1))
|
(p: torch.Tensor) -> float
|
715,595 |
pytorch_optimizer.optimizer.amos
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
momentum, beta = group['momentum'], group['beta']
lr_sq: float = math.sqrt(group['lr'])
bias_correction: float = 1.0 - beta ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg_sq'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
state['decay'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
if group['momentum'] > 0.0:
state['exp_avg'] = torch.zeros_like(p)
g2 = grad.pow(2).mean()
init_lr: float = group['lr'] * self.get_scale(p)
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta).add_(g2, alpha=1.0 - beta)
r_v_hat = bias_correction / (exp_avg_sq + group['eps'])
b = state['decay']
decay_factor_c = torch.rsqrt(1.0 + self.c_coef * lr_sq * b)
decay_factor_d = torch.reciprocal(1.0 + self.d_coef * math.sqrt(init_lr) * b)
gamma = decay_factor_c * (group['lr'] ** 2) * r_v_hat * g2
update = p.clone()
update.mul_((gamma - group['extra_l2']) / 2.0)
update.add_(r_v_hat.sqrt() * grad, alpha=init_lr)
update.mul_(decay_factor_d)
b.mul_(1.0 + gamma).add_(gamma)
if momentum > 0.0:
exp_avg = state['exp_avg']
exp_avg.mul_(momentum).add_(update, alpha=1.0 - momentum)
update.copy_(exp_avg)
p.add_(-update)
return loss
|
(self)
|
715,610 |
pytorch_optimizer.optimizer.apollo
|
Apollo
|
An Adaptive Parameter-wise Diagonal Quasi-Newton Method for Nonconvex Stochastic Optimization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param init_lr: Optional[float]. initial learning rate (default lr / 1000).
:param beta: float. coefficient used for computing running averages of gradient.
:param rebound: str. rectified bound for diagonal hessian. (constant, belief).
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decay_type: str. type of weight decay. (l2, decoupled, stable).
:param warmup_steps: int. number of warmup steps.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Apollo(Optimizer, BaseOptimizer):
r"""An Adaptive Parameter-wise Diagonal Quasi-Newton Method for Nonconvex Stochastic Optimization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param init_lr: Optional[float]. initial learning rate (default lr / 1000).
:param beta: float. coefficient used for computing running averages of gradient.
:param rebound: str. rectified bound for diagonal hessian. (constant, belief).
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decay_type: str. type of weight decay. (l2, decoupled, stable).
:param warmup_steps: int. number of warmup steps.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
init_lr: Optional[float] = None,
beta: float = 0.9,
rebound: str = 'constant',
weight_decay: float = 0.0,
weight_decay_type: str = 'l2',
warmup_steps: int = 500,
eps: float = 1e-4,
):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
self.validate_options(rebound, 'rebound', ['constant', 'belief'])
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_options(weight_decay_type, 'weight_decay_type', ['l2', 'decoupled', 'stable'])
self.validate_non_negative(eps, 'eps')
self.lr = lr
self.warmup_steps = warmup_steps
self.init_lr: float = init_lr if init_lr is not None else lr / 1000.0
defaults: DEFAULTS = {
'lr': lr,
'init_lr': self.init_lr,
'beta': beta,
'rebound': rebound,
'weight_decay': weight_decay,
'weight_decay_type': weight_decay_type,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Apollo'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg_grad'] = torch.zeros_like(p)
state['approx_hessian'] = torch.zeros_like(p)
state['update'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
current_lr: float = (
group['lr']
if group['step'] >= self.warmup_steps
else (self.lr - group['init_lr']) * group['step'] / self.warmup_steps + group['init_lr']
)
weight_decay, eps = group['weight_decay'], group['eps']
bias_correction: float = 1.0 - group['beta'] ** group['step']
alpha: float = (1.0 - group['beta']) / bias_correction
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg_grad'] = torch.zeros_like(p)
state['approx_hessian'] = torch.zeros_like(p)
state['update'] = torch.zeros_like(p)
if weight_decay > 0.0 and group['weight_decay_type'] == 'l2':
grad.add_(p, alpha=weight_decay)
exp_avg_grad, b, d_p = state['exp_avg_grad'], state['approx_hessian'], state['update']
delta_grad = grad - exp_avg_grad
if group['rebound'] == 'belief':
rebound = delta_grad.norm(p=np.inf)
else:
rebound = 1e-2
eps /= rebound
exp_avg_grad.add_(delta_grad, alpha=alpha)
de_nom = d_p.norm(p=4).add_(eps)
d_p.div_(de_nom)
v_sq = d_p.mul(d_p)
delta = delta_grad.div_(de_nom).mul_(d_p).sum().mul(-alpha) - b.mul(v_sq).sum()
b.addcmul_(v_sq, delta)
de_nom = b.abs().clamp_(min=rebound)
if group['rebound'] == 'belief':
de_nom.add_(eps / alpha)
d_p.copy_(exp_avg_grad.div(de_nom))
if weight_decay > 0.0 and group['weight_decay_type'] != 'l2':
if group['weight_decay_type'] == 'stable':
weight_decay /= de_nom.mean().item()
d_p.add_(p, alpha=weight_decay)
p.add_(d_p, alpha=-current_lr)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, init_lr: Optional[float] = None, beta: float = 0.9, rebound: str = 'constant', weight_decay: float = 0.0, weight_decay_type: str = 'l2', warmup_steps: int = 500, eps: float = 0.0001)
|
715,612 |
pytorch_optimizer.optimizer.apollo
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
init_lr: Optional[float] = None,
beta: float = 0.9,
rebound: str = 'constant',
weight_decay: float = 0.0,
weight_decay_type: str = 'l2',
warmup_steps: int = 500,
eps: float = 1e-4,
):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
self.validate_options(rebound, 'rebound', ['constant', 'belief'])
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_options(weight_decay_type, 'weight_decay_type', ['l2', 'decoupled', 'stable'])
self.validate_non_negative(eps, 'eps')
self.lr = lr
self.warmup_steps = warmup_steps
self.init_lr: float = init_lr if init_lr is not None else lr / 1000.0
defaults: DEFAULTS = {
'lr': lr,
'init_lr': self.init_lr,
'beta': beta,
'rebound': rebound,
'weight_decay': weight_decay,
'weight_decay_type': weight_decay_type,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, init_lr: Optional[float] = None, beta: float = 0.9, rebound: str = 'constant', weight_decay: float = 0.0, weight_decay_type: str = 'l2', warmup_steps: int = 500, eps: float = 0.0001)
|
715,615 |
pytorch_optimizer.optimizer.apollo
|
__str__
| null |
def __str__(self) -> str:
return 'Apollo'
|
(self) -> str
|
715,636 |
pytorch_optimizer.optimizer.apollo
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
current_lr: float = (
group['lr']
if group['step'] >= self.warmup_steps
else (self.lr - group['init_lr']) * group['step'] / self.warmup_steps + group['init_lr']
)
weight_decay, eps = group['weight_decay'], group['eps']
bias_correction: float = 1.0 - group['beta'] ** group['step']
alpha: float = (1.0 - group['beta']) / bias_correction
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg_grad'] = torch.zeros_like(p)
state['approx_hessian'] = torch.zeros_like(p)
state['update'] = torch.zeros_like(p)
if weight_decay > 0.0 and group['weight_decay_type'] == 'l2':
grad.add_(p, alpha=weight_decay)
exp_avg_grad, b, d_p = state['exp_avg_grad'], state['approx_hessian'], state['update']
delta_grad = grad - exp_avg_grad
if group['rebound'] == 'belief':
rebound = delta_grad.norm(p=np.inf)
else:
rebound = 1e-2
eps /= rebound
exp_avg_grad.add_(delta_grad, alpha=alpha)
de_nom = d_p.norm(p=4).add_(eps)
d_p.div_(de_nom)
v_sq = d_p.mul(d_p)
delta = delta_grad.div_(de_nom).mul_(d_p).sum().mul(-alpha) - b.mul(v_sq).sum()
b.addcmul_(v_sq, delta)
de_nom = b.abs().clamp_(min=rebound)
if group['rebound'] == 'belief':
de_nom.add_(eps / alpha)
d_p.copy_(exp_avg_grad.div(de_nom))
if weight_decay > 0.0 and group['weight_decay_type'] != 'l2':
if group['weight_decay_type'] == 'stable':
weight_decay /= de_nom.mean().item()
d_p.add_(p, alpha=weight_decay)
p.add_(d_p, alpha=-current_lr)
return loss
|
(self)
|
715,651 |
pytorch_optimizer.optimizer.avagrad
|
AvaGrad
|
Domain-independent Dominance of Adaptive Methods.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AvaGrad(Optimizer, BaseOptimizer):
r"""Domain-independent Dominance of Adaptive Methods.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-1,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
adam_debias: bool = False,
eps: float = 1e-1,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adam_debias': adam_debias,
'gamma': None,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AvaGrad'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
prev_bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** (group['step'] - 1))
squared_norm: float = 0.0
num_params: float = 0.0
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg = state['exp_avg']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq = state['exp_avg_sq']
sqrt_exp_avg_sq = exp_avg_sq.sqrt()
if group['step'] > 1:
de_nom = sqrt_exp_avg_sq.div(prev_bias_correction2_sq).add_(group['eps'])
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['gamma'] * group['lr'],
bias_correction1=bias_correction1,
)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
param_wise_lr = sqrt_exp_avg_sq.div_(bias_correction2_sq).add_(group['eps'])
squared_norm += param_wise_lr.norm(-2) ** -2
num_params += param_wise_lr.numel()
group['gamma'] = 0.0 if num_params == 0.0 else 1.0 / math.sqrt(squared_norm / num_params)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.1, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, adam_debias: bool = False, eps: float = 0.1)
|
715,653 |
pytorch_optimizer.optimizer.avagrad
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-1,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
adam_debias: bool = False,
eps: float = 1e-1,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adam_debias': adam_debias,
'gamma': None,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.1, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, adam_debias: bool = False, eps: float = 0.1)
|
715,656 |
pytorch_optimizer.optimizer.avagrad
|
__str__
| null |
def __str__(self) -> str:
return 'AvaGrad'
|
(self) -> str
|
715,677 |
pytorch_optimizer.optimizer.avagrad
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
prev_bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** (group['step'] - 1))
squared_norm: float = 0.0
num_params: float = 0.0
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg = state['exp_avg']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq = state['exp_avg_sq']
sqrt_exp_avg_sq = exp_avg_sq.sqrt()
if group['step'] > 1:
de_nom = sqrt_exp_avg_sq.div(prev_bias_correction2_sq).add_(group['eps'])
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['gamma'] * group['lr'],
bias_correction1=bias_correction1,
)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
param_wise_lr = sqrt_exp_avg_sq.div_(bias_correction2_sq).add_(group['eps'])
squared_norm += param_wise_lr.norm(-2) ** -2
num_params += param_wise_lr.numel()
group['gamma'] = 0.0 if num_params == 0.0 else 1.0 / math.sqrt(squared_norm / num_params)
return loss
|
(self)
|
715,692 |
pytorch_optimizer.loss.focal
|
BCEFocalLoss
|
BCEFocal loss function w/ probability input.
:param alpha: float. alpha.
:param gamma: float. gamma.
:param label_smooth: float. Smoothness constant for dice coefficient (a).
:param eps: float. epsilon.
:param reduction: str. type of reduction.
|
class BCEFocalLoss(nn.Module):
r"""BCEFocal loss function w/ probability input.
:param alpha: float. alpha.
:param gamma: float. gamma.
:param label_smooth: float. Smoothness constant for dice coefficient (a).
:param eps: float. epsilon.
:param reduction: str. type of reduction.
"""
def __init__(
self,
alpha: float = 0.25,
gamma: float = 2.0,
label_smooth: float = 0.0,
eps: float = 1e-6,
reduction: str = 'mean',
):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.bce = BCELoss(label_smooth=label_smooth, eps=eps, reduction='none')
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
bce_loss = self.bce(y_pred, y_true)
focal_loss = (
y_true * self.alpha * (1.0 - y_pred) ** self.gamma * bce_loss
+ (1.0 - y_true) ** self.gamma * bce_loss
) # fmt: skip
return focal_loss.mean() if self.reduction == 'mean' else focal_loss.sum()
|
(alpha: float = 0.25, gamma: float = 2.0, label_smooth: float = 0.0, eps: float = 1e-06, reduction: str = 'mean')
|
715,698 |
pytorch_optimizer.loss.focal
|
__init__
| null |
def __init__(
self,
alpha: float = 0.25,
gamma: float = 2.0,
label_smooth: float = 0.0,
eps: float = 1e-6,
reduction: str = 'mean',
):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.bce = BCELoss(label_smooth=label_smooth, eps=eps, reduction='none')
|
(self, alpha: float = 0.25, gamma: float = 2.0, label_smooth: float = 0.0, eps: float = 1e-06, reduction: str = 'mean')
|
715,728 |
pytorch_optimizer.loss.focal
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
bce_loss = self.bce(y_pred, y_true)
focal_loss = (
y_true * self.alpha * (1.0 - y_pred) ** self.gamma * bce_loss
+ (1.0 - y_true) ** self.gamma * bce_loss
) # fmt: skip
return focal_loss.mean() if self.reduction == 'mean' else focal_loss.sum()
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
715,762 |
pytorch_optimizer.loss.cross_entropy
|
BCELoss
|
binary cross entropy with label smoothing + probability input.
:param label_smooth: float. Smoothness constant for dice coefficient (a).
:param eps: float. epsilon.
:param reduction: str. type of reduction.
|
class BCELoss(nn.Module):
r"""binary cross entropy with label smoothing + probability input.
:param label_smooth: float. Smoothness constant for dice coefficient (a).
:param eps: float. epsilon.
:param reduction: str. type of reduction.
"""
def __init__(self, label_smooth: float = 0.0, eps: float = 1e-6, reduction: str = 'mean'):
super().__init__()
self.label_smooth = label_smooth
self.eps = eps
self.reduction = reduction
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if self.training and self.label_smooth > 0.0:
y_true = (1.0 - self.label_smooth) * y_true + self.label_smooth / y_pred.size(-1)
y_pred = torch.clamp(y_pred, self.eps, 1.0 - self.eps)
return binary_cross_entropy(y_pred, y_true, reduction=self.reduction)
|
(label_smooth: float = 0.0, eps: float = 1e-06, reduction: str = 'mean')
|
715,768 |
pytorch_optimizer.loss.cross_entropy
|
__init__
| null |
def __init__(self, label_smooth: float = 0.0, eps: float = 1e-6, reduction: str = 'mean'):
super().__init__()
self.label_smooth = label_smooth
self.eps = eps
self.reduction = reduction
|
(self, label_smooth: float = 0.0, eps: float = 1e-06, reduction: str = 'mean')
|
715,798 |
pytorch_optimizer.loss.cross_entropy
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if self.training and self.label_smooth > 0.0:
y_true = (1.0 - self.label_smooth) * y_true + self.label_smooth / y_pred.size(-1)
y_pred = torch.clamp(y_pred, self.eps, 1.0 - self.eps)
return binary_cross_entropy(y_pred, y_true, reduction=self.reduction)
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
715,832 |
pytorch_optimizer.loss.bi_tempered
|
BiTemperedLogisticLoss
|
Bi-Tempered Log Loss.
Reference : https://github.com/BloodAxe/pytorch-toolbelt/blob/develop/pytorch_toolbelt/losses/bitempered_loss.py
:param t1: float. Temperature 1 (< 1.0 for boundedness).
:param t2: float. Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
:param label_smooth: float. Label smoothing parameter between [0, 1).
:param ignore_index: Optional[int]. Index to ignore.
:param reduction: str. type of reduction.
|
class BiTemperedLogisticLoss(nn.Module):
"""Bi-Tempered Log Loss.
Reference : https://github.com/BloodAxe/pytorch-toolbelt/blob/develop/pytorch_toolbelt/losses/bitempered_loss.py
:param t1: float. Temperature 1 (< 1.0 for boundedness).
:param t2: float. Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
:param label_smooth: float. Label smoothing parameter between [0, 1).
:param ignore_index: Optional[int]. Index to ignore.
:param reduction: str. type of reduction.
"""
def __init__(
self,
t1: float,
t2: float,
label_smooth: float = 0.0,
ignore_index: Optional[int] = None,
reduction: str = 'mean',
):
super().__init__()
self.t1 = t1
self.t2 = t2
self.label_smooth = label_smooth
self.ignore_index = ignore_index
self.reduction = reduction
def forward(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
loss = bi_tempered_logistic_loss(
predictions, targets, t1=self.t1, t2=self.t2, label_smooth=self.label_smooth, reduction='none'
)
if self.ignore_index is not None:
mask = ~targets.eq(self.ignore_index)
loss *= mask
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
|
(t1: float, t2: float, label_smooth: float = 0.0, ignore_index: Optional[int] = None, reduction: str = 'mean')
|
715,838 |
pytorch_optimizer.loss.bi_tempered
|
__init__
| null |
def __init__(
self,
t1: float,
t2: float,
label_smooth: float = 0.0,
ignore_index: Optional[int] = None,
reduction: str = 'mean',
):
super().__init__()
self.t1 = t1
self.t2 = t2
self.label_smooth = label_smooth
self.ignore_index = ignore_index
self.reduction = reduction
|
(self, t1: float, t2: float, label_smooth: float = 0.0, ignore_index: Optional[int] = None, reduction: str = 'mean')
|
715,868 |
pytorch_optimizer.loss.bi_tempered
|
forward
| null |
def forward(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
loss = bi_tempered_logistic_loss(
predictions, targets, t1=self.t1, t2=self.t2, label_smooth=self.label_smooth, reduction='none'
)
if self.ignore_index is not None:
mask = ~targets.eq(self.ignore_index)
loss *= mask
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
|
(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor
|
715,902 |
pytorch_optimizer.loss.bi_tempered
|
BinaryBiTemperedLogisticLoss
|
Modification of BiTemperedLogisticLoss for binary classification case.
:param t1: float. Temperature 1 (< 1.0 for boundedness).
:param t2: float. Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
:param label_smooth: float. Label smoothing parameter between [0, 1).
:param ignore_index: Optional[int]. Index to ignore.
:param reduction: str. type of reduction.
|
class BinaryBiTemperedLogisticLoss(nn.Module):
"""Modification of BiTemperedLogisticLoss for binary classification case.
:param t1: float. Temperature 1 (< 1.0 for boundedness).
:param t2: float. Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
:param label_smooth: float. Label smoothing parameter between [0, 1).
:param ignore_index: Optional[int]. Index to ignore.
:param reduction: str. type of reduction.
"""
def __init__(
self,
t1: float,
t2: float,
label_smooth: float = 0.0,
ignore_index: Optional[int] = None,
reduction: str = 'mean',
):
super().__init__()
self.t1 = t1
self.t2 = t2
self.label_smooth = label_smooth
self.ignore_index = ignore_index
self.reduction = reduction
def forward(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
if predictions.size(1) != 1 or targets.size(1) != 1:
raise ValueError('Channel dimension for predictions and targets must be equal to 1')
loss = bi_tempered_logistic_loss(
torch.cat((-predictions, predictions), dim=1).moveaxis(1, -1),
torch.cat((1.0 - targets, targets), dim=1).moveaxis(1, -1),
t1=self.t1,
t2=self.t2,
label_smooth=self.label_smooth,
reduction='none',
).unsqueeze(dim=1)
if self.ignore_index is not None:
mask = targets.eq(self.ignore_index)
loss = torch.masked_fill(loss, mask, value=0)
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
|
(t1: float, t2: float, label_smooth: float = 0.0, ignore_index: Optional[int] = None, reduction: str = 'mean')
|
715,938 |
pytorch_optimizer.loss.bi_tempered
|
forward
| null |
def forward(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
if predictions.size(1) != 1 or targets.size(1) != 1:
raise ValueError('Channel dimension for predictions and targets must be equal to 1')
loss = bi_tempered_logistic_loss(
torch.cat((-predictions, predictions), dim=1).moveaxis(1, -1),
torch.cat((1.0 - targets, targets), dim=1).moveaxis(1, -1),
t1=self.t1,
t2=self.t2,
label_smooth=self.label_smooth,
reduction='none',
).unsqueeze(dim=1)
if self.ignore_index is not None:
mask = targets.eq(self.ignore_index)
loss = torch.masked_fill(loss, mask, value=0)
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
|
(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor
|
715,972 |
pytorch_optimizer.optimizer.shampoo_utils
|
BlockPartitioner
|
Partition a tensor into smaller tensors for preconditioning.
For example, if a variable has shape (4096, 512), we might split the 4096 into 4 blocks,
so we effectively have 4 variables of size (1024, 512) each.
:param var: torch.Tensor. tensor variable.
:param rank: int. rank.
:param block_size: int. block size.
:param pre_conditioner_type: int type of pre-conditioner.
|
class BlockPartitioner:
r"""Partition a tensor into smaller tensors for preconditioning.
For example, if a variable has shape (4096, 512), we might split the 4096 into 4 blocks,
so we effectively have 4 variables of size (1024, 512) each.
:param var: torch.Tensor. tensor variable.
:param rank: int. rank.
:param block_size: int. block size.
:param pre_conditioner_type: int type of pre-conditioner.
"""
def __init__(self, var: torch.Tensor, rank: int, block_size: int, pre_conditioner_type: int):
self.shape: List[int] = var.shape
self.splits: List[Tuple[int, np.ndarray]] = []
self.split_sizes: List[Tuple[int, np.ndarray]] = []
split_sizes: List[np.ndarray] = []
# We split var into smaller blocks. Here we store the metadata to make that split.
for i, d in enumerate(self.shape):
if block_size <= 0 or block_size >= d:
split_sizes.append(np.array([d], dtype=np.int32))
continue
# d - 1, otherwise split appends a 0-size array.
num_split: int = (d - 1) // block_size
indices = (np.arange(num_split, dtype=np.int32) + 1) * block_size
sizes: np.ndarray = np.ones(num_split + 1, dtype=np.int32) * block_size
sizes[-1] = d - indices[-1]
self.splits.append((i, indices))
self.split_sizes.append((i, sizes))
split_sizes.append(sizes)
self.num_splits: int = len(split_sizes)
self.pre_conditioner_shapes: List[List[int]] = self.build_pre_conditioner_shapes(
split_sizes, pre_conditioner_type, rank
)
@staticmethod
def build_pre_conditioner_shapes(
split_sizes: List[np.ndarray], pre_conditioner_type: int, rank: int
) -> List[List[int]]:
r"""Build pre-conditioner shapes."""
pre_conditioner_shapes: List[List[int]] = []
for t in itertools.product(*split_sizes):
t_shape: List[Optional[List[int]]] = [[d, d] for d in t]
if pre_conditioner_type == PreConditionerType.INPUT:
t_shape = t_shape[:-1] + [None]
if pre_conditioner_type == PreConditionerType.OUTPUT:
t_shape = [None] * (rank - 1) + t_shape[-1:]
pre_conditioner_shapes.extend(t_shape)
return pre_conditioner_shapes
def shapes_for_pre_conditioners(self) -> List[List[int]]:
r"""Get shapes of pre-conditioner."""
return self.pre_conditioner_shapes
@torch.no_grad()
def partition(self, x: torch.Tensor) -> List[torch.Tensor]:
r"""Partition tensor into blocks."""
if x.shape != self.shape:
raise ValueError(f'self.shape != x.shape ({self.shape} vs {x.shape})')
tensors = [x]
for i, sizes in self.split_sizes:
tensors = [torch.split(t, list(sizes), dim=i) for t in tensors]
tensors = [t for tensor in tensors for t in tensor]
return tensors
def merge_partitions(self, partitions: List[torch.Tensor]) -> torch.Tensor:
r"""Merge partitions back to original shape."""
for i, indices in reversed(self.splits):
n: int = len(indices) + 1
partitions: List[torch.Tensor] = [
torch.cat(partitions[idx:idx + n], dim=i) for idx in range(0, len(partitions), n) # fmt: skip
]
return partitions[0]
|
(var: torch.Tensor, rank: int, block_size: int, pre_conditioner_type: int)
|
715,973 |
pytorch_optimizer.optimizer.shampoo_utils
|
__init__
| null |
def __init__(self, var: torch.Tensor, rank: int, block_size: int, pre_conditioner_type: int):
self.shape: List[int] = var.shape
self.splits: List[Tuple[int, np.ndarray]] = []
self.split_sizes: List[Tuple[int, np.ndarray]] = []
split_sizes: List[np.ndarray] = []
# We split var into smaller blocks. Here we store the metadata to make that split.
for i, d in enumerate(self.shape):
if block_size <= 0 or block_size >= d:
split_sizes.append(np.array([d], dtype=np.int32))
continue
# d - 1, otherwise split appends a 0-size array.
num_split: int = (d - 1) // block_size
indices = (np.arange(num_split, dtype=np.int32) + 1) * block_size
sizes: np.ndarray = np.ones(num_split + 1, dtype=np.int32) * block_size
sizes[-1] = d - indices[-1]
self.splits.append((i, indices))
self.split_sizes.append((i, sizes))
split_sizes.append(sizes)
self.num_splits: int = len(split_sizes)
self.pre_conditioner_shapes: List[List[int]] = self.build_pre_conditioner_shapes(
split_sizes, pre_conditioner_type, rank
)
|
(self, var: torch.Tensor, rank: int, block_size: int, pre_conditioner_type: int)
|
715,974 |
pytorch_optimizer.optimizer.shampoo_utils
|
build_pre_conditioner_shapes
|
Build pre-conditioner shapes.
|
@staticmethod
def build_pre_conditioner_shapes(
split_sizes: List[np.ndarray], pre_conditioner_type: int, rank: int
) -> List[List[int]]:
r"""Build pre-conditioner shapes."""
pre_conditioner_shapes: List[List[int]] = []
for t in itertools.product(*split_sizes):
t_shape: List[Optional[List[int]]] = [[d, d] for d in t]
if pre_conditioner_type == PreConditionerType.INPUT:
t_shape = t_shape[:-1] + [None]
if pre_conditioner_type == PreConditionerType.OUTPUT:
t_shape = [None] * (rank - 1) + t_shape[-1:]
pre_conditioner_shapes.extend(t_shape)
return pre_conditioner_shapes
|
(split_sizes: List[numpy.ndarray], pre_conditioner_type: int, rank: int) -> List[List[int]]
|
715,975 |
pytorch_optimizer.optimizer.shampoo_utils
|
merge_partitions
|
Merge partitions back to original shape.
|
def merge_partitions(self, partitions: List[torch.Tensor]) -> torch.Tensor:
r"""Merge partitions back to original shape."""
for i, indices in reversed(self.splits):
n: int = len(indices) + 1
partitions: List[torch.Tensor] = [
torch.cat(partitions[idx:idx + n], dim=i) for idx in range(0, len(partitions), n) # fmt: skip
]
return partitions[0]
|
(self, partitions: List[torch.Tensor]) -> torch.Tensor
|
715,976 |
pytorch_optimizer.optimizer.shampoo_utils
|
partition
|
Partition tensor into blocks.
|
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad / (torch.sqrt(self.statistics) + self.diagonal_eps)
|
(self, x: torch.Tensor) -> List[torch.Tensor]
|
715,977 |
pytorch_optimizer.optimizer.shampoo_utils
|
shapes_for_pre_conditioners
|
Get shapes of pre-conditioner.
|
def shapes_for_pre_conditioners(self) -> List[List[int]]:
r"""Get shapes of pre-conditioner."""
return self.pre_conditioner_shapes
|
(self) -> List[List[int]]
|
715,978 |
pytorch_optimizer.optimizer.came
|
CAME
|
Confidence-guided Adaptive Memory Efficient Optimization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param clip_threshold: float. threshold of root-mean-square of final gradient update.
:param ams_bound: bool. whether to use the AMSBound variant.
:param eps1: float. term added to the denominator to improve numerical stability.
:param eps2: float. term added to the denominator to improve numerical stability.
|
class CAME(Optimizer, BaseOptimizer):
r"""Confidence-guided Adaptive Memory Efficient Optimization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param clip_threshold: float. threshold of root-mean-square of final gradient update.
:param ams_bound: bool. whether to use the AMSBound variant.
:param eps1: float. term added to the denominator to improve numerical stability.
:param eps2: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 2e-4,
betas: BETAS = (0.9, 0.999, 0.9999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
clip_threshold: float = 1.0,
ams_bound: bool = False,
eps1: float = 1e-30,
eps2: float = 1e-16,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps1, 'eps1')
self.validate_non_negative(eps2, 'eps2')
self.clip_threshold = clip_threshold
self.eps1 = eps1
self.eps2 = eps2
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'eps1': eps1,
'eps2': eps2,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'CAME'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
grad = p.grad
grad_shape: Tuple[int, ...] = grad.shape
factored: bool = self.get_options(grad_shape)
state['exp_avg'] = torch.zeros_like(p)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1], dtype=grad.dtype, device=grad.device)
state['exp_avg_sq_col'] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:], dtype=grad.dtype, device=grad.device
)
state['exp_avg_res_row'] = torch.zeros(grad_shape[:-1], dtype=grad.dtype, device=grad.device)
state['exp_avg_res_col'] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:], dtype=grad.dtype, device=grad.device
)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
if group['ams_bound']:
state['exp_avg_sq_hat'] = torch.zeros_like(grad)
state['RMS'] = 0.0
@staticmethod
def get_options(shape: Tuple[int, ...]) -> bool:
r"""Get `factored`."""
return len(shape) >= 2
@staticmethod
def get_rms(x: torch.Tensor) -> float:
r"""Get RMS."""
return x.norm(2) / math.sqrt(x.numel())
@staticmethod
def approximate_sq_grad(
exp_avg_sq_row: torch.Tensor,
exp_avg_sq_col: torch.Tensor,
output: torch.Tensor,
):
r"""Get approximation of EMA of squared gradient."""
r_factor: torch.Tensor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor: torch.Tensor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
torch.mul(r_factor, c_factor, out=output)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2, beta3 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
grad_shape: Tuple[int, ...] = grad.shape
factored: bool = self.get_options(grad_shape)
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1], dtype=grad.dtype, device=grad.device)
state['exp_avg_sq_col'] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:], dtype=grad.dtype, device=grad.device
)
state['exp_avg_res_row'] = torch.zeros(grad_shape[:-1], dtype=grad.dtype, device=grad.device)
state['exp_avg_res_col'] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:], dtype=grad.dtype, device=grad.device
)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
if group['ams_bound']:
state['exp_avg_sq_hat'] = torch.zeros_like(grad)
state['RMS'] = 0.0
state['RMS'] = self.get_rms(p)
update = torch.mul(grad, grad).add_(self.eps1)
if factored:
exp_avg_sq_row, exp_avg_sq_col = state['exp_avg_sq_row'], state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2).add_(update.mean(dim=-1), alpha=1.0 - beta2)
exp_avg_sq_col.mul_(beta2).add_(update.mean(dim=-2), alpha=1.0 - beta2)
self.approximate_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2).add_(update, alpha=1.0 - beta2)
torch.rsqrt(exp_avg_sq, out=update)
if group['ams_bound']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
torch.max(exp_avg_sq_hat, 1 / update, out=exp_avg_sq_hat)
torch.rsqrt(exp_avg_sq_hat / beta2, out=update)
update.mul_(grad)
update.div_((self.get_rms(update) / self.clip_threshold).clamp_(min=1.0))
exp_avg = state['exp_avg']
exp_avg.mul_(beta1).add_(update, alpha=1.0 - beta1)
res = update - exp_avg
res.pow_(2).add_(self.eps2)
if factored:
exp_avg_res_row, exp_avg_res_col = state['exp_avg_res_row'], state['exp_avg_res_col']
exp_avg_res_row.mul_(beta3).add_(res.mean(dim=-1), alpha=1.0 - beta3)
exp_avg_res_col.mul_(beta3).add_(res.mean(dim=-2), alpha=1.0 - beta3)
self.approximate_sq_grad(exp_avg_res_row, exp_avg_res_col, update)
update.mul_(exp_avg)
else:
update = exp_avg
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
update.mul_(group['lr'])
p.add_(-update)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.0002, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999, 0.9999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, clip_threshold: float = 1.0, ams_bound: bool = False, eps1: float = 1e-30, eps2: float = 1e-16)
|
715,980 |
pytorch_optimizer.optimizer.came
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 2e-4,
betas: BETAS = (0.9, 0.999, 0.9999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
clip_threshold: float = 1.0,
ams_bound: bool = False,
eps1: float = 1e-30,
eps2: float = 1e-16,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps1, 'eps1')
self.validate_non_negative(eps2, 'eps2')
self.clip_threshold = clip_threshold
self.eps1 = eps1
self.eps2 = eps2
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'eps1': eps1,
'eps2': eps2,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.0002, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999, 0.9999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, clip_threshold: float = 1.0, ams_bound: bool = False, eps1: float = 1e-30, eps2: float = 1e-16)
|
715,983 |
pytorch_optimizer.optimizer.came
|
__str__
| null |
def __str__(self) -> str:
return 'CAME'
|
(self) -> str
|
716,007 |
pytorch_optimizer.optimizer.came
|
reset
| null |
@staticmethod
def approximate_sq_grad(
exp_avg_sq_row: torch.Tensor,
exp_avg_sq_col: torch.Tensor,
output: torch.Tensor,
):
r"""Get approximation of EMA of squared gradient."""
r_factor: torch.Tensor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor: torch.Tensor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
torch.mul(r_factor, c_factor, out=output)
|
(self)
|
716,022 |
torch.optim.lr_scheduler
|
ConstantLR
|
Multiply the learning rate of each parameter group by a small constant factor until the
number of epoch reaches a pre-defined milestone: total_iters.
Notice that such multiplication of the small constant factor can
happen simultaneously with other changes to the learning rate from outside this scheduler.
When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
factor (float): The number we multiply learning rate until the milestone. Default: 1./3.
total_iters (int): The number of steps that the scheduler multiplies the learning rate by the factor.
Default: 5.
last_epoch (int): The index of the last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
.. deprecated:: 2.2
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
learning rate.
Example:
>>> # xdoctest: +SKIP
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.025 if epoch == 0
>>> # lr = 0.025 if epoch == 1
>>> # lr = 0.025 if epoch == 2
>>> # lr = 0.025 if epoch == 3
>>> # lr = 0.05 if epoch >= 4
>>> scheduler = ConstantLR(optimizer, factor=0.5, total_iters=4)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
|
class ConstantLR(LRScheduler):
"""Multiply the learning rate of each parameter group by a small constant factor until the
number of epoch reaches a pre-defined milestone: total_iters.
Notice that such multiplication of the small constant factor can
happen simultaneously with other changes to the learning rate from outside this scheduler.
When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
factor (float): The number we multiply learning rate until the milestone. Default: 1./3.
total_iters (int): The number of steps that the scheduler multiplies the learning rate by the factor.
Default: 5.
last_epoch (int): The index of the last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
.. deprecated:: 2.2
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
learning rate.
Example:
>>> # xdoctest: +SKIP
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.025 if epoch == 0
>>> # lr = 0.025 if epoch == 1
>>> # lr = 0.025 if epoch == 2
>>> # lr = 0.025 if epoch == 3
>>> # lr = 0.05 if epoch >= 4
>>> scheduler = ConstantLR(optimizer, factor=0.5, total_iters=4)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(self, optimizer, factor=1.0 / 3, total_iters=5, last_epoch=-1, verbose="deprecated"):
if factor > 1.0 or factor < 0:
raise ValueError('Constant multiplicative factor expected to be between 0 and 1.')
self.factor = factor
self.total_iters = total_iters
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == 0:
return [group['lr'] * self.factor for group in self.optimizer.param_groups]
if self.last_epoch != self.total_iters:
return [group['lr'] for group in self.optimizer.param_groups]
return [group['lr'] * (1.0 / self.factor) for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
return [base_lr * (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor))
for base_lr in self.base_lrs]
|
(optimizer, factor=0.3333333333333333, total_iters=5, last_epoch=-1, verbose='deprecated')
|
716,023 |
torch.optim.lr_scheduler
|
__init__
| null |
def __init__(self, optimizer, factor=1.0 / 3, total_iters=5, last_epoch=-1, verbose="deprecated"):
if factor > 1.0 or factor < 0:
raise ValueError('Constant multiplicative factor expected to be between 0 and 1.')
self.factor = factor
self.total_iters = total_iters
super().__init__(optimizer, last_epoch, verbose)
|
(self, optimizer, factor=0.3333333333333333, total_iters=5, last_epoch=-1, verbose='deprecated')
|
716,024 |
torch.optim.lr_scheduler
|
_get_closed_form_lr
| null |
def _get_closed_form_lr(self):
return [base_lr * (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor))
for base_lr in self.base_lrs]
|
(self)
|
716,027 |
torch.optim.lr_scheduler
|
get_lr
| null |
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == 0:
return [group['lr'] * self.factor for group in self.optimizer.param_groups]
if self.last_epoch != self.total_iters:
return [group['lr'] for group in self.optimizer.param_groups]
return [group['lr'] * (1.0 / self.factor) for group in self.optimizer.param_groups]
|
(self)
|
716,032 |
torch.optim.lr_scheduler
|
CosineAnnealingLR
|
Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\begin{aligned}
\eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1
+ \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right),
& T_{cur} \neq (2k+1)T_{max}; \\
\eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min})
\left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right),
& T_{cur} = (2k+1)T_{max}.
\end{aligned}
When last_epoch=-1, sets initial lr as lr. Notice that because the schedule
is defined recursively, the learning rate can be simultaneously modified
outside this scheduler by other operators. If the learning rate is set
solely by this scheduler, the learning rate at each step becomes:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right)
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
.. deprecated:: 2.2
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
learning rate.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
|
class CosineAnnealingLR(LRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\begin{aligned}
\eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1
+ \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right),
& T_{cur} \neq (2k+1)T_{max}; \\
\eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min})
\left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right),
& T_{cur} = (2k+1)T_{max}.
\end{aligned}
When last_epoch=-1, sets initial lr as lr. Notice that because the schedule
is defined recursively, the learning rate can be simultaneously modified
outside this scheduler by other operators. If the learning rate is set
solely by this scheduler, the learning rate at each step becomes:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right)
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
.. deprecated:: 2.2
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
learning rate.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1, verbose="deprecated"):
self.T_max = T_max
self.eta_min = eta_min
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == 0:
return [group['lr'] for group in self.optimizer.param_groups]
elif self._step_count == 1 and self.last_epoch > 0:
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos((self.last_epoch) * math.pi / self.T_max)) / 2
for base_lr, group in
zip(self.base_lrs, self.optimizer.param_groups)]
elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
return [group['lr'] + (base_lr - self.eta_min) *
(1 - math.cos(math.pi / self.T_max)) / 2
for base_lr, group in
zip(self.base_lrs, self.optimizer.param_groups)]
return [(1 + math.cos(math.pi * self.last_epoch / self.T_max)) /
(1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) *
(group['lr'] - self.eta_min) + self.eta_min
for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
|
(optimizer, T_max, eta_min=0, last_epoch=-1, verbose='deprecated')
|
716,033 |
torch.optim.lr_scheduler
|
__init__
| null |
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1, verbose="deprecated"):
self.T_max = T_max
self.eta_min = eta_min
super().__init__(optimizer, last_epoch, verbose)
|
(self, optimizer, T_max, eta_min=0, last_epoch=-1, verbose='deprecated')
|
716,034 |
torch.optim.lr_scheduler
|
_get_closed_form_lr
| null |
def _get_closed_form_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
|
(self)
|
716,037 |
torch.optim.lr_scheduler
|
get_lr
| null |
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == 0:
return [group['lr'] for group in self.optimizer.param_groups]
elif self._step_count == 1 and self.last_epoch > 0:
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos((self.last_epoch) * math.pi / self.T_max)) / 2
for base_lr, group in
zip(self.base_lrs, self.optimizer.param_groups)]
elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
return [group['lr'] + (base_lr - self.eta_min) *
(1 - math.cos(math.pi / self.T_max)) / 2
for base_lr, group in
zip(self.base_lrs, self.optimizer.param_groups)]
return [(1 + math.cos(math.pi * self.last_epoch / self.T_max)) /
(1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) *
(group['lr'] - self.eta_min) + self.eta_min
for group in self.optimizer.param_groups]
|
(self)
|
716,051 |
pytorch_optimizer.lr_scheduler.cosine_anealing
|
CosineAnnealingWarmupRestarts
|
CosineAnnealingWarmupRestarts.
:param optimizer: Optimizer. wrapped optimizer instance.
:param first_cycle_steps: int. first cycle step size.
:param cycle_mult: float. cycle steps magnification.
:param max_lr: float.
:param min_lr: float.
:param warmup_steps: int. number of warmup steps.
:param gamma: float. decrease rate of lr by cycle.
:param last_epoch: int. step size of the current cycle.
|
class CosineAnnealingWarmupRestarts(_LRScheduler):
r"""CosineAnnealingWarmupRestarts.
:param optimizer: Optimizer. wrapped optimizer instance.
:param first_cycle_steps: int. first cycle step size.
:param cycle_mult: float. cycle steps magnification.
:param max_lr: float.
:param min_lr: float.
:param warmup_steps: int. number of warmup steps.
:param gamma: float. decrease rate of lr by cycle.
:param last_epoch: int. step size of the current cycle.
"""
def __init__(
self,
optimizer: OPTIMIZER,
first_cycle_steps: int,
cycle_mult: float = 1.0,
max_lr: float = 1e-4,
min_lr: float = 1e-6,
warmup_steps: int = 0,
gamma: float = 0.9,
last_epoch: int = -1,
):
if warmup_steps >= first_cycle_steps:
raise ValueError(
f'[-] warmup_steps must be smaller than first_cycle_steps. {warmup_steps} < {first_cycle_steps}'
)
self.first_cycle_steps = first_cycle_steps
self.cycle_mult = cycle_mult
self.base_max_lr = max_lr
self.max_lr = max_lr
self.min_lr = min_lr
self.warmup_steps = warmup_steps
self.gamma = gamma
self.cur_cycle_steps = first_cycle_steps
self.step_in_cycle = last_epoch
self.last_epoch = last_epoch
self.cycle: int = 0
self.base_lrs: List[float] = []
super().__init__(optimizer, last_epoch)
self.init_lr()
def init_lr(self):
self.base_lrs = []
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.min_lr
self.base_lrs.append(self.min_lr)
def get_lr(self) -> List[float]:
if self.step_in_cycle == -1:
return self.base_lrs
if self.step_in_cycle < self.warmup_steps:
return [
(self.max_lr - base_lr) * self.step_in_cycle / self.warmup_steps + base_lr for base_lr in self.base_lrs
]
return [
base_lr
+ (self.max_lr - base_lr)
* (
1
+ math.cos(
math.pi * (self.step_in_cycle - self.warmup_steps) / (self.cur_cycle_steps - self.warmup_steps)
)
)
/ 2.0
for base_lr in self.base_lrs
]
def step(self, epoch: Optional[int] = None):
if epoch is None:
epoch = self.last_epoch + 1
self.step_in_cycle = self.step_in_cycle + 1
if self.step_in_cycle >= self.cur_cycle_steps:
self.cycle += 1
self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps
self.cur_cycle_steps = (
int((self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps
)
elif epoch >= self.first_cycle_steps:
if self.cycle_mult == 1.0:
self.step_in_cycle = epoch % self.first_cycle_steps
self.cycle = epoch // self.first_cycle_steps
else:
n: int = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult))
self.cycle = n
self.step_in_cycle = epoch - int(
self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1)
) # fmt: skip
self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** n # fmt: skip
else:
self.cur_cycle_steps = self.first_cycle_steps
self.step_in_cycle = epoch
self.max_lr = self.base_max_lr * (self.gamma ** self.cycle) # fmt: skip
self.last_epoch = math.floor(epoch)
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
|
(optimizer: Type[torch.optim.optimizer.Optimizer], first_cycle_steps: int, cycle_mult: float = 1.0, max_lr: float = 0.0001, min_lr: float = 1e-06, warmup_steps: int = 0, gamma: float = 0.9, last_epoch: int = -1)
|
716,052 |
pytorch_optimizer.lr_scheduler.cosine_anealing
|
__init__
| null |
def __init__(
self,
optimizer: OPTIMIZER,
first_cycle_steps: int,
cycle_mult: float = 1.0,
max_lr: float = 1e-4,
min_lr: float = 1e-6,
warmup_steps: int = 0,
gamma: float = 0.9,
last_epoch: int = -1,
):
if warmup_steps >= first_cycle_steps:
raise ValueError(
f'[-] warmup_steps must be smaller than first_cycle_steps. {warmup_steps} < {first_cycle_steps}'
)
self.first_cycle_steps = first_cycle_steps
self.cycle_mult = cycle_mult
self.base_max_lr = max_lr
self.max_lr = max_lr
self.min_lr = min_lr
self.warmup_steps = warmup_steps
self.gamma = gamma
self.cur_cycle_steps = first_cycle_steps
self.step_in_cycle = last_epoch
self.last_epoch = last_epoch
self.cycle: int = 0
self.base_lrs: List[float] = []
super().__init__(optimizer, last_epoch)
self.init_lr()
|
(self, optimizer: Type[torch.optim.optimizer.Optimizer], first_cycle_steps: int, cycle_mult: float = 1.0, max_lr: float = 0.0001, min_lr: float = 1e-06, warmup_steps: int = 0, gamma: float = 0.9, last_epoch: int = -1)
|
716,055 |
pytorch_optimizer.lr_scheduler.cosine_anealing
|
get_lr
| null |
def get_lr(self) -> List[float]:
if self.step_in_cycle == -1:
return self.base_lrs
if self.step_in_cycle < self.warmup_steps:
return [
(self.max_lr - base_lr) * self.step_in_cycle / self.warmup_steps + base_lr for base_lr in self.base_lrs
]
return [
base_lr
+ (self.max_lr - base_lr)
* (
1
+ math.cos(
math.pi * (self.step_in_cycle - self.warmup_steps) / (self.cur_cycle_steps - self.warmup_steps)
)
)
/ 2.0
for base_lr in self.base_lrs
]
|
(self) -> List[float]
|
716,056 |
pytorch_optimizer.lr_scheduler.cosine_anealing
|
init_lr
| null |
def init_lr(self):
self.base_lrs = []
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.min_lr
self.base_lrs.append(self.min_lr)
|
(self)
|
716,060 |
pytorch_optimizer.lr_scheduler.cosine_anealing
|
step
| null |
def step(self, epoch: Optional[int] = None):
if epoch is None:
epoch = self.last_epoch + 1
self.step_in_cycle = self.step_in_cycle + 1
if self.step_in_cycle >= self.cur_cycle_steps:
self.cycle += 1
self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps
self.cur_cycle_steps = (
int((self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps
)
elif epoch >= self.first_cycle_steps:
if self.cycle_mult == 1.0:
self.step_in_cycle = epoch % self.first_cycle_steps
self.cycle = epoch // self.first_cycle_steps
else:
n: int = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult))
self.cycle = n
self.step_in_cycle = epoch - int(
self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1)
) # fmt: skip
self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** n # fmt: skip
else:
self.cur_cycle_steps = self.first_cycle_steps
self.step_in_cycle = epoch
self.max_lr = self.base_max_lr * (self.gamma ** self.cycle) # fmt: skip
self.last_epoch = math.floor(epoch)
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
|
(self, epoch: Optional[int] = None)
|
716,061 |
pytorch_optimizer.lr_scheduler.linear_warmup
|
CosineScheduler
|
Cosine LR Scheduler w/ linear warmup.
|
class CosineScheduler(BaseLinearWarmupScheduler):
r"""Cosine LR Scheduler w/ linear warmup."""
def _step(self) -> float:
phase: float = (self.step_t - self.warmup_steps) / (self.total_steps - self.warmup_steps) * math.pi
return self.min_lr + (self.max_lr - self.min_lr) * (np.cos(phase) + 1.0) / 2.0
|
(optimizer: Type[torch.optim.optimizer.Optimizer], t_max: int, max_lr: float, min_lr: float = 0.0, init_lr: float = 0.0, warmup_steps: int = 0)
|
716,062 |
pytorch_optimizer.base.scheduler
|
__init__
| null |
def __init__(
self,
optimizer: OPTIMIZER,
t_max: int,
max_lr: float,
min_lr: float = 0.0,
init_lr: float = 0.0,
warmup_steps: int = 0,
):
self.optimizer = optimizer
self.total_steps = t_max
self.max_lr = max_lr
self.min_lr = min_lr
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.step_t: int = 0
self.base_lrs: List[float] = []
# record current value in self._last_lr to match API from torch.optim.lr_scheduler
self.last_lr: List[float] = [init_lr]
self.validate_parameters()
self._init_lr()
|
(self, optimizer: Type[torch.optim.optimizer.Optimizer], t_max: int, max_lr: float, min_lr: float = 0.0, init_lr: float = 0.0, warmup_steps: int = 0)
|
716,063 |
pytorch_optimizer.base.scheduler
|
_init_lr
| null |
def _init_lr(self):
self.base_lrs = []
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.min_lr
self.base_lrs.append(self.min_lr)
|
(self)
|
716,064 |
pytorch_optimizer.lr_scheduler.linear_warmup
|
_step
| null |
def _step(self) -> float:
phase: float = (self.step_t - self.warmup_steps) / (self.total_steps - self.warmup_steps) * math.pi
return self.min_lr + (self.max_lr - self.min_lr) * (np.cos(phase) + 1.0) / 2.0
|
(self) -> float
|
716,065 |
pytorch_optimizer.base.scheduler
|
get_lr
| null |
def get_lr(self) -> float:
return self.last_lr[0]
|
(self) -> float
|
716,066 |
pytorch_optimizer.base.scheduler
|
step
| null |
def step(self):
if self.step_t < self.warmup_steps:
value = self.init_lr + (self.max_lr - self.init_lr) * self.step_t / self.warmup_steps
elif self.step_t == self.warmup_steps:
value = self.max_lr
else:
value = self._step()
self.step_t += 1
# apply the lr to optimizer if it's provided
if self.optimizer is not None:
for param_group in self.optimizer.param_groups:
param_group['lr'] = value
self.last_lr = [value]
return value
|
(self)
|
716,067 |
pytorch_optimizer.base.scheduler
|
validate_parameters
| null |
def validate_parameters(self):
if self.min_lr < 0:
raise NegativeLRError(self.min_lr, 'min_lr')
if self.max_lr < 0:
raise NegativeLRError(self.max_lr, 'max_lr')
if self.init_lr < 0:
raise NegativeLRError(self.init_lr, 'init_lr')
if self.total_steps < 0:
raise NegativeStepError(self.total_steps, 't_max')
if self.warmup_steps < 0:
raise NegativeStepError(self.warmup_steps, 'warmup_steps')
|
(self)
|
716,068 |
torch.optim.lr_scheduler
|
CyclicLR
|
Sets the learning rate of each parameter group according to
cyclical learning rate policy (CLR). The policy cycles the learning
rate between two boundaries with a constant frequency, as detailed in
the paper `Cyclical Learning Rates for Training Neural Networks`_.
The distance between the two boundaries can be scaled on a per-iteration
or per-cycle basis.
Cyclical learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This class has three built-in policies, as put forth in the paper:
* "triangular": A basic triangular cycle without amplitude scaling.
* "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle.
* "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}`
at each cycle iteration.
This implementation was adapted from the github repo: `bckenstler/CLR`_
Args:
optimizer (Optimizer): Wrapped optimizer.
base_lr (float or list): Initial learning rate which is the
lower boundary in the cycle for each parameter group.
max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr
and some scaling of the amplitude; therefore
max_lr may not actually be reached depending on
scaling function.
step_size_up (int): Number of training iterations in the
increasing half of a cycle. Default: 2000
step_size_down (int): Number of training iterations in the
decreasing half of a cycle. If step_size_down is None,
it is set to step_size_up. Default: None
mode (str): One of {triangular, triangular2, exp_range}.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
Default: 'triangular'
gamma (float): Constant in 'exp_range' scaling function:
gamma**(cycle iterations)
Default: 1.0
scale_fn (function): Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
If specified, then 'mode' is ignored.
Default: None
scale_mode (str): {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle).
Default: 'cycle'
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum'.
Default: True
base_momentum (float or list): Lower momentum boundaries in the cycle
for each parameter group. Note that momentum is cycled inversely
to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr'.
Default: 0.8
max_momentum (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_momentum - base_momentum).
The momentum at any cycle is the difference of max_momentum
and some scaling of the amplitude; therefore
base_momentum may not actually be reached depending on
scaling function. Note that momentum is cycled inversely
to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr'
Default: 0.9
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
.. deprecated:: 2.2
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
learning rate.
Example:
>>> # xdoctest: +SKIP
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
.. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186
.. _bckenstler/CLR: https://github.com/bckenstler/CLR
|
class CyclicLR(LRScheduler):
r"""Sets the learning rate of each parameter group according to
cyclical learning rate policy (CLR). The policy cycles the learning
rate between two boundaries with a constant frequency, as detailed in
the paper `Cyclical Learning Rates for Training Neural Networks`_.
The distance between the two boundaries can be scaled on a per-iteration
or per-cycle basis.
Cyclical learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This class has three built-in policies, as put forth in the paper:
* "triangular": A basic triangular cycle without amplitude scaling.
* "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle.
* "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}`
at each cycle iteration.
This implementation was adapted from the github repo: `bckenstler/CLR`_
Args:
optimizer (Optimizer): Wrapped optimizer.
base_lr (float or list): Initial learning rate which is the
lower boundary in the cycle for each parameter group.
max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr
and some scaling of the amplitude; therefore
max_lr may not actually be reached depending on
scaling function.
step_size_up (int): Number of training iterations in the
increasing half of a cycle. Default: 2000
step_size_down (int): Number of training iterations in the
decreasing half of a cycle. If step_size_down is None,
it is set to step_size_up. Default: None
mode (str): One of {triangular, triangular2, exp_range}.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
Default: 'triangular'
gamma (float): Constant in 'exp_range' scaling function:
gamma**(cycle iterations)
Default: 1.0
scale_fn (function): Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
If specified, then 'mode' is ignored.
Default: None
scale_mode (str): {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle).
Default: 'cycle'
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum'.
Default: True
base_momentum (float or list): Lower momentum boundaries in the cycle
for each parameter group. Note that momentum is cycled inversely
to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr'.
Default: 0.8
max_momentum (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_momentum - base_momentum).
The momentum at any cycle is the difference of max_momentum
and some scaling of the amplitude; therefore
base_momentum may not actually be reached depending on
scaling function. Note that momentum is cycled inversely
to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr'
Default: 0.9
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
.. deprecated:: 2.2
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
learning rate.
Example:
>>> # xdoctest: +SKIP
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
.. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186
.. _bckenstler/CLR: https://github.com/bckenstler/CLR
"""
def __init__(self,
optimizer,
base_lr,
max_lr,
step_size_up=2000,
step_size_down=None,
mode='triangular',
gamma=1.,
scale_fn=None,
scale_mode='cycle',
cycle_momentum=True,
base_momentum=0.8,
max_momentum=0.9,
last_epoch=-1,
verbose="deprecated"):
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(f'{type(optimizer).__name__} is not an Optimizer')
self.optimizer = optimizer
base_lrs = self._format_param('base_lr', optimizer, base_lr)
if last_epoch == -1:
for lr, group in zip(base_lrs, optimizer.param_groups):
group['lr'] = lr
self.max_lrs = self._format_param('max_lr', optimizer, max_lr)
step_size_up = float(step_size_up)
step_size_down = float(step_size_down) if step_size_down is not None else step_size_up
self.total_size = step_size_up + step_size_down
self.step_ratio = step_size_up / self.total_size
if mode not in ['triangular', 'triangular2', 'exp_range'] \
and scale_fn is None:
raise ValueError('mode is invalid and scale_fn is None')
self.mode = mode
self.gamma = gamma
self._scale_fn_ref = None
self._scale_fn_custom = scale_fn
self.scale_mode = scale_mode
self._init_scale_fn()
self.cycle_momentum = cycle_momentum
if cycle_momentum:
if 'momentum' not in optimizer.defaults and 'betas' not in optimizer.defaults:
raise ValueError('optimizer must support momentum or beta1 with `cycle_momentum` option enabled')
self.use_beta1 = 'betas' in self.optimizer.defaults
self.base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
if last_epoch == -1:
for m_momentum, b_momentum, group in zip(self.max_momentums, self.base_momentums, optimizer.param_groups):
if self.use_beta1:
group['betas'] = (m_momentum, *group['betas'][1:])
else:
group['momentum'] = m_momentum
group['max_momentum'] = m_momentum
group['base_momentum'] = b_momentum
super().__init__(optimizer, last_epoch, verbose)
self.base_lrs = base_lrs
def _init_scale_fn(self):
if self._scale_fn_custom is not None:
return
if self.mode == 'triangular':
self._scale_fn_ref = self._triangular_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self._scale_fn_ref = self._triangular2_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self._scale_fn_ref = partial(self._exp_range_scale_fn, self.gamma)
self.scale_mode = 'iterations'
def _format_param(self, name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}")
return param
else:
return [param] * len(optimizer.param_groups)
def scale_fn(self, x):
if self._scale_fn_custom is not None:
return self._scale_fn_custom(x)
else:
return self._scale_fn_ref(x) # static method
@staticmethod
def _triangular_scale_fn(x):
return 1.
@staticmethod
def _triangular2_scale_fn(x):
return 1 / (2. ** (x - 1))
@staticmethod
def _exp_range_scale_fn(gamma, x):
return gamma ** x
def get_lr(self):
"""Calculates the learning rate at batch index. This function treats
`self.last_epoch` as the last batch index.
If `self.cycle_momentum` is ``True``, this function has a side effect of
updating the optimizer's momentum.
"""
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
cycle = math.floor(1 + self.last_epoch / self.total_size)
x = 1. + self.last_epoch / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
scale_factor = (x - 1) / (self.step_ratio - 1)
lrs = []
for base_lr, max_lr in zip(self.base_lrs, self.max_lrs):
base_height = (max_lr - base_lr) * scale_factor
if self.scale_mode == 'cycle':
lr = base_lr + base_height * self.scale_fn(cycle)
else:
lr = base_lr + base_height * self.scale_fn(self.last_epoch)
lrs.append(lr)
if self.cycle_momentum:
momentums = []
for base_momentum, max_momentum in zip(self.base_momentums, self.max_momentums):
base_height = (max_momentum - base_momentum) * scale_factor
if self.scale_mode == 'cycle':
momentum = max_momentum - base_height * self.scale_fn(cycle)
else:
momentum = max_momentum - base_height * self.scale_fn(self.last_epoch)
momentums.append(momentum)
for param_group, momentum in zip(self.optimizer.param_groups, momentums):
if self.use_beta1:
param_group['betas'] = (momentum, *param_group['betas'][1:])
else:
param_group['momentum'] = momentum
return lrs
def state_dict(self):
state = super().state_dict()
# We are dropping the `_scale_fn_ref` attribute because it is a
# `weakref.WeakMethod` and can't be pickled.
state.pop('_scale_fn_ref')
fn = state.pop('_scale_fn_custom')
state['_scale_fn_custom'] = None
if fn is not None and not isinstance(fn, types.FunctionType):
# The _scale_fn_custom will only be saved if it is a callable object
# and not if it is a function or lambda.
state['_scale_fn_custom'] = fn.__dict__.copy()
return state
def load_state_dict(self, state_dict):
fn = state_dict.pop('_scale_fn_custom')
super().load_state_dict(state_dict)
if fn is not None:
self._scale_fn_custom.__dict__.update(fn)
self._init_scale_fn()
|
(optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, last_epoch=-1, verbose='deprecated')
|
716,069 |
torch.optim.lr_scheduler
|
__init__
| null |
def __init__(self,
optimizer,
base_lr,
max_lr,
step_size_up=2000,
step_size_down=None,
mode='triangular',
gamma=1.,
scale_fn=None,
scale_mode='cycle',
cycle_momentum=True,
base_momentum=0.8,
max_momentum=0.9,
last_epoch=-1,
verbose="deprecated"):
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(f'{type(optimizer).__name__} is not an Optimizer')
self.optimizer = optimizer
base_lrs = self._format_param('base_lr', optimizer, base_lr)
if last_epoch == -1:
for lr, group in zip(base_lrs, optimizer.param_groups):
group['lr'] = lr
self.max_lrs = self._format_param('max_lr', optimizer, max_lr)
step_size_up = float(step_size_up)
step_size_down = float(step_size_down) if step_size_down is not None else step_size_up
self.total_size = step_size_up + step_size_down
self.step_ratio = step_size_up / self.total_size
if mode not in ['triangular', 'triangular2', 'exp_range'] \
and scale_fn is None:
raise ValueError('mode is invalid and scale_fn is None')
self.mode = mode
self.gamma = gamma
self._scale_fn_ref = None
self._scale_fn_custom = scale_fn
self.scale_mode = scale_mode
self._init_scale_fn()
self.cycle_momentum = cycle_momentum
if cycle_momentum:
if 'momentum' not in optimizer.defaults and 'betas' not in optimizer.defaults:
raise ValueError('optimizer must support momentum or beta1 with `cycle_momentum` option enabled')
self.use_beta1 = 'betas' in self.optimizer.defaults
self.base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
if last_epoch == -1:
for m_momentum, b_momentum, group in zip(self.max_momentums, self.base_momentums, optimizer.param_groups):
if self.use_beta1:
group['betas'] = (m_momentum, *group['betas'][1:])
else:
group['momentum'] = m_momentum
group['max_momentum'] = m_momentum
group['base_momentum'] = b_momentum
super().__init__(optimizer, last_epoch, verbose)
self.base_lrs = base_lrs
|
(self, optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, last_epoch=-1, verbose='deprecated')
|
716,070 |
torch.optim.lr_scheduler
|
_exp_range_scale_fn
| null |
@staticmethod
def _exp_range_scale_fn(gamma, x):
return gamma ** x
|
(gamma, x)
|
716,071 |
torch.optim.lr_scheduler
|
_format_param
|
Return correctly formatted lr/momentum for each param group.
|
def _format_param(self, name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}")
return param
else:
return [param] * len(optimizer.param_groups)
|
(self, name, optimizer, param)
|
716,072 |
torch.optim.lr_scheduler
|
_init_scale_fn
| null |
def _init_scale_fn(self):
if self._scale_fn_custom is not None:
return
if self.mode == 'triangular':
self._scale_fn_ref = self._triangular_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self._scale_fn_ref = self._triangular2_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self._scale_fn_ref = partial(self._exp_range_scale_fn, self.gamma)
self.scale_mode = 'iterations'
|
(self)
|
716,074 |
torch.optim.lr_scheduler
|
_triangular2_scale_fn
| null |
@staticmethod
def _triangular2_scale_fn(x):
return 1 / (2. ** (x - 1))
|
(x)
|
716,075 |
torch.optim.lr_scheduler
|
_triangular_scale_fn
| null |
@staticmethod
def _triangular_scale_fn(x):
return 1.
|
(x)
|
716,077 |
torch.optim.lr_scheduler
|
get_lr
|
Calculates the learning rate at batch index. This function treats
`self.last_epoch` as the last batch index.
If `self.cycle_momentum` is ``True``, this function has a side effect of
updating the optimizer's momentum.
|
def get_lr(self):
"""Calculates the learning rate at batch index. This function treats
`self.last_epoch` as the last batch index.
If `self.cycle_momentum` is ``True``, this function has a side effect of
updating the optimizer's momentum.
"""
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
cycle = math.floor(1 + self.last_epoch / self.total_size)
x = 1. + self.last_epoch / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
scale_factor = (x - 1) / (self.step_ratio - 1)
lrs = []
for base_lr, max_lr in zip(self.base_lrs, self.max_lrs):
base_height = (max_lr - base_lr) * scale_factor
if self.scale_mode == 'cycle':
lr = base_lr + base_height * self.scale_fn(cycle)
else:
lr = base_lr + base_height * self.scale_fn(self.last_epoch)
lrs.append(lr)
if self.cycle_momentum:
momentums = []
for base_momentum, max_momentum in zip(self.base_momentums, self.max_momentums):
base_height = (max_momentum - base_momentum) * scale_factor
if self.scale_mode == 'cycle':
momentum = max_momentum - base_height * self.scale_fn(cycle)
else:
momentum = max_momentum - base_height * self.scale_fn(self.last_epoch)
momentums.append(momentum)
for param_group, momentum in zip(self.optimizer.param_groups, momentums):
if self.use_beta1:
param_group['betas'] = (momentum, *param_group['betas'][1:])
else:
param_group['momentum'] = momentum
return lrs
|
(self)
|
716,078 |
torch.optim.lr_scheduler
|
load_state_dict
| null |
def load_state_dict(self, state_dict):
fn = state_dict.pop('_scale_fn_custom')
super().load_state_dict(state_dict)
if fn is not None:
self._scale_fn_custom.__dict__.update(fn)
self._init_scale_fn()
|
(self, state_dict)
|
716,080 |
torch.optim.lr_scheduler
|
scale_fn
| null |
def scale_fn(self, x):
if self._scale_fn_custom is not None:
return self._scale_fn_custom(x)
else:
return self._scale_fn_ref(x) # static method
|
(self, x)
|
716,081 |
torch.optim.lr_scheduler
|
state_dict
| null |
def state_dict(self):
state = super().state_dict()
# We are dropping the `_scale_fn_ref` attribute because it is a
# `weakref.WeakMethod` and can't be pickled.
state.pop('_scale_fn_ref')
fn = state.pop('_scale_fn_custom')
state['_scale_fn_custom'] = None
if fn is not None and not isinstance(fn, types.FunctionType):
# The _scale_fn_custom will only be saved if it is a callable object
# and not if it is a function or lambda.
state['_scale_fn_custom'] = fn.__dict__.copy()
return state
|
(self)
|
716,083 |
pytorch_optimizer.optimizer.dadapt
|
DAdaptAdaGrad
|
AdaGrad with D-Adaptation. Leave LR set to 1 unless you encounter instability.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
|
class DAdaptAdaGrad(Optimizer, BaseOptimizer):
r"""AdaGrad with D-Adaptation. Leave LR set to 1 unless you encounter instability.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
momentum: float = 0.0,
d0: float = 1e-6,
growth_rate: float = float('inf'),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 0.0,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0, range_type='[)')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'd': d0,
'growth_rate': growth_rate,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'k': 0,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'DAdaptAdaGrad'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['alpha_k'] = torch.full_like(p, fill_value=1e-6)
state['sk'] = torch.zeros_like(p)
state['x0'] = torch.clone(p)
if p.grad.is_sparse:
state['weighted_sk'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
device = group['params'][0].device
d, lr = group['d'], group['lr']
d_lr: float = d * lr
g_sq = torch.tensor([0.0], device=device)
sk_sq_weighted_change = torch.tensor([0.0], device=device)
sk_l1_change = torch.tensor([0.0], device=device)
if 'gsq_weighted' not in group:
group['gsq_weighted'] = torch.tensor([0.0], device=device)
if 'sk_sq_weighted' not in group:
group['sk_sq_weighted'] = torch.tensor([0.0], device=device)
if 'sk_l1' not in group:
group['sk_l1'] = torch.tensor([0.0], device=device)
gsq_weighted = group['gsq_weighted']
sk_sq_weighted = group['sk_sq_weighted']
sk_l1 = group['sk_l1']
for group in self.param_groups:
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if 'alpha_k' not in state:
state['alpha_k'] = torch.full_like(p, fill_value=1e-6)
state['sk'] = torch.zeros_like(p)
state['x0'] = torch.clone(p)
if grad.is_sparse:
state['weighted_sk'] = torch.zeros_like(p)
sk, alpha_k = state['sk'], state['alpha_k']
if grad.is_sparse:
weighted_sk = state['weighted_sk']
grad = grad.coalesce()
vk = grad._values().pow(2)
sk_masked = sk.sparse_mask(grad).coalesce()
old_sk_l1_masked = sk_masked._values().abs().sum()
sk.add_(grad, alpha=d_lr)
sk_masked = sk.sparse_mask(grad).coalesce()
alpha_k_masked = alpha_k.sparse_mask(grad).coalesce()
weighted_sk_masked = weighted_sk.sparse_mask(grad).coalesce()
# update alpha before step
alpha_k_p1_masked = alpha_k_masked._values() + vk
alpha_k_delta_masked = alpha_k_p1_masked - alpha_k_masked._values()
alpha_k_delta = torch.sparse_coo_tensor(grad.indices(), alpha_k_delta_masked, grad.shape)
alpha_k.add_(alpha_k_delta)
de_nom = torch.sqrt(alpha_k_p1_masked + eps)
grad_sq = vk.div(de_nom).sum()
g_sq.add_(grad_sq)
# update weighted sk sq tracking
weighted_sk_p1_masked = sk_masked._values().pow(2).div(de_nom)
sk_sq_weighted_change.add_(weighted_sk_p1_masked.sum() - weighted_sk_masked._values().sum())
weighted_sk_p1_delta_masked = weighted_sk_p1_masked - weighted_sk_masked._values()
weighted_sk_p1_delta = torch.sparse_coo_tensor(
grad.indices(), weighted_sk_p1_delta_masked, grad.shape
)
weighted_sk.add_(weighted_sk_p1_delta)
sk_l1_masked = sk_masked._values().abs().sum()
sk_l1_change.add_(sk_l1_masked - old_sk_l1_masked)
else:
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
old_sk_sq_weighted_param = sk.pow(2).div(torch.sqrt(alpha_k) + eps).sum()
old_sk_l1_param = sk.abs().sum()
alpha_k.add_(grad.pow(2))
grad_sq = grad.pow(2).div(torch.sqrt(alpha_k) + eps).sum()
g_sq.add_(grad_sq)
sk.add_(grad, alpha=d_lr)
sk_sq_weighted_param = sk.pow(2).div(torch.sqrt(alpha_k) + eps).sum()
sk_l1_param = sk.abs().sum()
sk_sq_weighted_change.add_(sk_sq_weighted_param - old_sk_sq_weighted_param)
sk_l1_change.add_(sk_l1_param - old_sk_l1_param)
sk_sq_weighted.add_(sk_sq_weighted_change)
gsq_weighted.add_(g_sq, alpha=d_lr ** 2) # fmt: skip
sk_l1.add_(sk_l1_change)
if sk_l1 == 0:
return loss
if lr > 0.0:
d_hat = (sk_sq_weighted - gsq_weighted) / sk_l1
d = group['d'] = max(d, min(d_hat.item(), d * group['growth_rate']))
for group in self.param_groups:
group['gsq_weighted'] = gsq_weighted
group['sk_sq_weighted'] = sk_sq_weighted
group['sk_l1'] = sk_l1
group['d'] = d
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
alpha_k, sk, x0 = state['alpha_k'], state['sk'], state['x0']
if grad.is_sparse:
grad = grad.coalesce()
sk_masked = sk.sparse_mask(grad).coalesce()._values()
alpha_k_masked = alpha_k.sparse_mask(grad).coalesce()._values()
x0_masked = x0.sparse_mask(grad).coalesce()._values()
p_masked = p.sparse_mask(grad).coalesce()._values()
loc_masked = x0_masked - sk_masked.div(torch.sqrt(alpha_k_masked + group['eps']))
loc_delta_masked = loc_masked - p_masked
loc_delta = torch.sparse_coo_tensor(grad.indices(), loc_delta_masked, grad.shape)
p.add_(loc_delta)
else:
z = x0 - sk.div(alpha_k.sqrt().add_(group['eps']))
if group['momentum'] > 0.0:
p.mul_(group['momentum']).add_(z, alpha=1.0 - group['momentum'])
else:
p.copy_(z)
group['k'] += 1
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, momentum: float = 0.0, d0: float = 1e-06, growth_rate: float = inf, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 0.0)
|
716,085 |
pytorch_optimizer.optimizer.dadapt
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
momentum: float = 0.0,
d0: float = 1e-6,
growth_rate: float = float('inf'),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 0.0,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0, range_type='[)')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'd': d0,
'growth_rate': growth_rate,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'k': 0,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, momentum: float = 0.0, d0: float = 1e-06, growth_rate: float = inf, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 0.0)
|
716,088 |
pytorch_optimizer.optimizer.dadapt
|
__str__
| null |
def __str__(self) -> str:
return 'DAdaptAdaGrad'
|
(self) -> str
|
716,109 |
pytorch_optimizer.optimizer.dadapt
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
device = group['params'][0].device
d, lr = group['d'], group['lr']
d_lr: float = d * lr
g_sq = torch.tensor([0.0], device=device)
sk_sq_weighted_change = torch.tensor([0.0], device=device)
sk_l1_change = torch.tensor([0.0], device=device)
if 'gsq_weighted' not in group:
group['gsq_weighted'] = torch.tensor([0.0], device=device)
if 'sk_sq_weighted' not in group:
group['sk_sq_weighted'] = torch.tensor([0.0], device=device)
if 'sk_l1' not in group:
group['sk_l1'] = torch.tensor([0.0], device=device)
gsq_weighted = group['gsq_weighted']
sk_sq_weighted = group['sk_sq_weighted']
sk_l1 = group['sk_l1']
for group in self.param_groups:
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if 'alpha_k' not in state:
state['alpha_k'] = torch.full_like(p, fill_value=1e-6)
state['sk'] = torch.zeros_like(p)
state['x0'] = torch.clone(p)
if grad.is_sparse:
state['weighted_sk'] = torch.zeros_like(p)
sk, alpha_k = state['sk'], state['alpha_k']
if grad.is_sparse:
weighted_sk = state['weighted_sk']
grad = grad.coalesce()
vk = grad._values().pow(2)
sk_masked = sk.sparse_mask(grad).coalesce()
old_sk_l1_masked = sk_masked._values().abs().sum()
sk.add_(grad, alpha=d_lr)
sk_masked = sk.sparse_mask(grad).coalesce()
alpha_k_masked = alpha_k.sparse_mask(grad).coalesce()
weighted_sk_masked = weighted_sk.sparse_mask(grad).coalesce()
# update alpha before step
alpha_k_p1_masked = alpha_k_masked._values() + vk
alpha_k_delta_masked = alpha_k_p1_masked - alpha_k_masked._values()
alpha_k_delta = torch.sparse_coo_tensor(grad.indices(), alpha_k_delta_masked, grad.shape)
alpha_k.add_(alpha_k_delta)
de_nom = torch.sqrt(alpha_k_p1_masked + eps)
grad_sq = vk.div(de_nom).sum()
g_sq.add_(grad_sq)
# update weighted sk sq tracking
weighted_sk_p1_masked = sk_masked._values().pow(2).div(de_nom)
sk_sq_weighted_change.add_(weighted_sk_p1_masked.sum() - weighted_sk_masked._values().sum())
weighted_sk_p1_delta_masked = weighted_sk_p1_masked - weighted_sk_masked._values()
weighted_sk_p1_delta = torch.sparse_coo_tensor(
grad.indices(), weighted_sk_p1_delta_masked, grad.shape
)
weighted_sk.add_(weighted_sk_p1_delta)
sk_l1_masked = sk_masked._values().abs().sum()
sk_l1_change.add_(sk_l1_masked - old_sk_l1_masked)
else:
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
old_sk_sq_weighted_param = sk.pow(2).div(torch.sqrt(alpha_k) + eps).sum()
old_sk_l1_param = sk.abs().sum()
alpha_k.add_(grad.pow(2))
grad_sq = grad.pow(2).div(torch.sqrt(alpha_k) + eps).sum()
g_sq.add_(grad_sq)
sk.add_(grad, alpha=d_lr)
sk_sq_weighted_param = sk.pow(2).div(torch.sqrt(alpha_k) + eps).sum()
sk_l1_param = sk.abs().sum()
sk_sq_weighted_change.add_(sk_sq_weighted_param - old_sk_sq_weighted_param)
sk_l1_change.add_(sk_l1_param - old_sk_l1_param)
sk_sq_weighted.add_(sk_sq_weighted_change)
gsq_weighted.add_(g_sq, alpha=d_lr ** 2) # fmt: skip
sk_l1.add_(sk_l1_change)
if sk_l1 == 0:
return loss
if lr > 0.0:
d_hat = (sk_sq_weighted - gsq_weighted) / sk_l1
d = group['d'] = max(d, min(d_hat.item(), d * group['growth_rate']))
for group in self.param_groups:
group['gsq_weighted'] = gsq_weighted
group['sk_sq_weighted'] = sk_sq_weighted
group['sk_l1'] = sk_l1
group['d'] = d
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
alpha_k, sk, x0 = state['alpha_k'], state['sk'], state['x0']
if grad.is_sparse:
grad = grad.coalesce()
sk_masked = sk.sparse_mask(grad).coalesce()._values()
alpha_k_masked = alpha_k.sparse_mask(grad).coalesce()._values()
x0_masked = x0.sparse_mask(grad).coalesce()._values()
p_masked = p.sparse_mask(grad).coalesce()._values()
loc_masked = x0_masked - sk_masked.div(torch.sqrt(alpha_k_masked + group['eps']))
loc_delta_masked = loc_masked - p_masked
loc_delta = torch.sparse_coo_tensor(grad.indices(), loc_delta_masked, grad.shape)
p.add_(loc_delta)
else:
z = x0 - sk.div(alpha_k.sqrt().add_(group['eps']))
if group['momentum'] > 0.0:
p.mul_(group['momentum']).add_(z, alpha=1.0 - group['momentum'])
else:
p.copy_(z)
group['k'] += 1
return loss
|
(self)
|
716,124 |
pytorch_optimizer.optimizer.dadapt
|
DAdaptAdam
|
Adam with D-Adaptation. Leave LR set to 1 unless you encounter instability. This implementation is based on V3.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. betas.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. use AdamW style weight decay.
:param fixed_decay: bool. fix weight decay.
:param bias_correction: bool. Turn on Adam's bias correction.
:param eps: float. term added to the denominator to improve numerical stability.
|
class DAdaptAdam(Optimizer, BaseOptimizer):
r"""Adam with D-Adaptation. Leave LR set to 1 unless you encounter instability. This implementation is based on V3.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. betas.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. use AdamW style weight decay.
:param fixed_decay: bool. fix weight decay.
:param bias_correction: bool. Turn on Adam's bias correction.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
betas: BETAS = (0.9, 0.999),
d0: float = 1e-6,
growth_rate: float = float('inf'),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
bias_correction: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'd': d0,
'growth_rate': growth_rate,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'bias_correction': bias_correction,
'step': 0,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'DAdaptAdam'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['s'] = torch.zeros_like(p)
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
device = group['params'][0].device
beta1, beta2 = group['betas']
beta2_sq: float = math.sqrt(beta2)
d: float = group['d']
lr: float = group['lr']
bias_correction1: float = 1.0 - beta1 ** (group['step'] + 1)
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** (group['step'] + 1))
bias_correction: float = bias_correction1 / bias_correction2_sq
# it's not Adam Debias
d_lr: float = self.apply_adam_debias(
not group['bias_correction'], step_size=d * lr, bias_correction1=bias_correction
)
sk_l1 = torch.tensor([0.0], device=device)
numerator_acc = torch.tensor([0.0], device=device)
if 'numerator_weighted' not in group:
group['numerator_weighted'] = torch.tensor([0.0], device=device)
numerator_weighted = group['numerator_weighted']
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if 'step' not in state:
state['s'] = torch.zeros_like(p)
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq, s = state['exp_avg'], state['exp_avg_sq'], state['s']
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
numerator_acc.add_(torch.dot(grad.flatten(), s.div(de_nom).flatten()), alpha=d_lr)
exp_avg.mul_(beta1).add_(grad, alpha=d_lr * (1.0 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
s.mul_(beta2_sq).add_(grad, alpha=d_lr * (1.0 - beta2_sq))
sk_l1.add_(s.abs().sum())
if sk_l1 == 0:
return loss
numerator_weighted.mul_(beta2_sq).add_(numerator_acc, alpha=1.0 - beta2_sq) # fmt: skip
if lr > 0.0:
d_hat = numerator_weighted / (1.0 - beta2_sq) * sk_l1
d = max(d, min(d_hat.item(), d * group['growth_rate']))
for group in self.param_groups:
group['numerator_weighted'] = numerator_weighted
group['d'] = d
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
self.apply_weight_decay(
p=p,
grad=None,
lr=d_lr,
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
p.addcdiv_(exp_avg, de_nom, value=-1.0)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), d0: float = 1e-06, growth_rate: float = inf, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, bias_correction: bool = False, eps: float = 1e-08)
|
716,126 |
pytorch_optimizer.optimizer.dadapt
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
betas: BETAS = (0.9, 0.999),
d0: float = 1e-6,
growth_rate: float = float('inf'),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
bias_correction: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'd': d0,
'growth_rate': growth_rate,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'bias_correction': bias_correction,
'step': 0,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), d0: float = 1e-06, growth_rate: float = inf, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, bias_correction: bool = False, eps: float = 1e-08)
|
716,129 |
pytorch_optimizer.optimizer.dadapt
|
__str__
| null |
def __str__(self) -> str:
return 'DAdaptAdam'
|
(self) -> str
|
716,165 |
pytorch_optimizer.optimizer.dadapt
|
DAdaptAdan
|
Adan with D-Adaptation. Leave LR set to 1 unless you encounter instability.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. decoupled weight decay.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted.
:param eps: float. term added to the denominator to improve numerical stability.
|
class DAdaptAdan(Optimizer, BaseOptimizer):
r"""Adan with D-Adaptation. Leave LR set to 1 unless you encounter instability.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. decoupled weight decay.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
betas: BETAS = (0.98, 0.92, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
d0: float = 1e-6,
growth_rate: float = float('inf'),
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'd': d0,
'growth_rate': growth_rate,
'k': 0,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'DAdaptAdan'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['step'] = 0
state['s'] = torch.zeros_like(p)
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
beta1, beta2, beta3 = group['betas']
growth_rate = group['growth_rate']
d, lr = group['d'], group['lr']
d_lr = float(d * lr)
g_sq = torch.tensor([0.0], device=group['params'][0].device)
sk_sq_weighted = torch.tensor([0.0], device=group['params'][0].device)
sk_l1 = torch.tensor([0.0], device=group['params'][0].device)
if 'gsq_weighted' not in group:
group['gsq_weighted'] = torch.tensor([0.0], device=group['params'][0].device)
gsq_weighted = group['gsq_weighted']
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if 'step' not in state:
state['step'] = 0
state['s'] = torch.zeros_like(p)
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
state['previous_grad'] = -grad.clone()
grad_diff = state['previous_grad']
grad_diff.add_(grad)
exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff']
exp_avg.mul_(beta1).add_(grad, alpha=d_lr * (1.0 - beta1))
exp_avg_diff.mul_(beta2).add_(grad_diff, alpha=d_lr * (1.0 - beta2))
grad_diff.mul_(beta2).add_(grad)
grad_diff = to_real(grad_diff * grad_diff.conj())
exp_avg_sq.mul_(beta3).addcmul_(grad_diff, grad_diff, value=1.0 - beta3)
grad_power = to_real(grad * grad.conj())
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
g_sq.add_(grad_power.div_(de_nom).sum())
s = state['s']
s.mul_(beta3).add_(grad, alpha=d_lr * (1.0 - beta3))
sk_sq_weighted.add_(to_real(s * s.conj()).div_(de_nom).sum())
sk_l1.add_(s.abs().sum())
state['previous_grad'].copy_(-grad)
if sk_l1 == 0:
return loss
gsq_weighted.mul_(beta3).add_(g_sq, alpha=(d_lr ** 2) * (1.0 - beta3)) # fmt: skip
if lr > 0.0:
d_hat = (sk_sq_weighted / (1.0 - beta3) - gsq_weighted) / sk_l1
d = max(d, min(d_hat, d * growth_rate))
for group in self.param_groups:
group['gsq_weighted'] = gsq_weighted
group['d'] = d
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['step'] += 1
exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff']
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
if group['weight_decouple']:
p.mul_(1.0 - d_lr * group['weight_decay'])
p.addcdiv_(exp_avg, de_nom, value=-1.0)
p.addcdiv_(exp_avg_diff, de_nom, value=-beta2)
if not group['weight_decouple']:
p.div_(1.0 + d_lr * group['weight_decay'])
group['k'] += 1
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.98, 0.92, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, d0: float = 1e-06, growth_rate: float = inf, eps: float = 1e-08)
|
716,167 |
pytorch_optimizer.optimizer.dadapt
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
betas: BETAS = (0.98, 0.92, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
d0: float = 1e-6,
growth_rate: float = float('inf'),
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'd': d0,
'growth_rate': growth_rate,
'k': 0,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.98, 0.92, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, d0: float = 1e-06, growth_rate: float = inf, eps: float = 1e-08)
|
716,170 |
pytorch_optimizer.optimizer.dadapt
|
__str__
| null |
def __str__(self) -> str:
return 'DAdaptAdan'
|
(self) -> str
|
716,206 |
pytorch_optimizer.optimizer.dadapt
|
DAdaptLion
|
Lion with D-Adaptation. Leave LR set to 1 unless you encounter instability. This implementation is based on V3.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
|
class DAdaptLion(Optimizer, BaseOptimizer):
r"""Lion with D-Adaptation. Leave LR set to 1 unless you encounter instability. This implementation is based on V3.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
betas: BETAS = (0.9, 0.999),
d0: float = 1e-6,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'd': d0,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'step': 0,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'DAdaptLion'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
device = group['params'][0].device
if 'numerator_weighted' not in group:
group['numerator_weighted'] = torch.tensor([0.0], device=device)
numerator_weighted = group['numerator_weighted']
sk_l1 = torch.tensor([0.0], device=device)
numerator_accumulator = torch.tensor([0.0], device=device)
beta1, beta2 = group['betas']
beta2_sq = math.sqrt(beta2)
d, lr = group['d'], group['lr']
d_lr: float = d * lr
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=d_lr,
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, s = state['exp_avg'], state['s']
update = exp_avg.clone().mul_(beta1).add_(grad, alpha=1.0 - beta1).sign_()
p.add_(update, alpha=-d_lr)
exp_avg.mul_(beta2).add_(grad, alpha=(1.0 - beta2) * d_lr)
numerator_accumulator.add_(torch.dot(update.flatten(), s.flatten()), alpha=d_lr)
s.mul_(beta2_sq).add_(update, alpha=(1.0 - beta2_sq) * d_lr)
sk_l1.add_(s.abs().sum())
numerator_weighted.mul_(beta2_sq).add_(numerator_accumulator, alpha=1.0 - beta2_sq)
if sk_l1 == 0:
return loss
if lr > 0.0:
d_hat: float = (numerator_weighted / ((1.0 - beta2_sq) * sk_l1)).item()
d = max(d, d_hat)
for group in self.param_groups:
group['step'] += 1
group['numerator_weighted'] = numerator_weighted
group['d'] = d
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), d0: float = 1e-06, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
716,208 |
pytorch_optimizer.optimizer.dadapt
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
betas: BETAS = (0.9, 0.999),
d0: float = 1e-6,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'd': d0,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'step': 0,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), d0: float = 1e-06, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
716,211 |
pytorch_optimizer.optimizer.dadapt
|
__str__
| null |
def __str__(self) -> str:
return 'DAdaptLion'
|
(self) -> str
|
716,247 |
pytorch_optimizer.optimizer.dadapt
|
DAdaptSGD
|
SGD with D-Adaptation. Leave LR set to 1 unless you encounter instability. This implementation is based on V3.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
|
class DAdaptSGD(Optimizer, BaseOptimizer):
r"""SGD with D-Adaptation. Leave LR set to 1 unless you encounter instability. This implementation is based on V3.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
momentum: float = 0.9,
d0: float = 1e-6,
growth_rate: float = float('inf'),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0, range_type='[)')
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'd': d0,
'growth_rate': growth_rate,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'step': 0,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'DAdaptSGD'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['z'] = p.clone()
state['s'] = torch.zeros_like(p)
state['x0'] = p.clone()
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
device = group['params'][0].device
sk_sq = torch.tensor([0.0], device=device)
if 'numerator_weighted' not in group:
group['numerator_weighted'] = torch.tensor([0.0], device=device)
numerator_weighted = group['numerator_weighted']
if group['step'] == 0:
group['g0_norm'] = get_global_gradient_norm(self.param_groups, device).sqrt_().item()
g0_norm = group['g0_norm']
if g0_norm == 0:
return loss
d, lr = group['d'], group['lr']
d_lr: float = d * lr / g0_norm
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['z'] = p.clone()
state['s'] = torch.zeros_like(p)
state['x0'] = p.clone()
self.apply_weight_decay(
p=p,
grad=None,
lr=d_lr,
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s = state['s']
numerator_weighted.add_(torch.dot(grad.flatten(), s.flatten()), alpha=d_lr)
s.add_(grad, alpha=d_lr)
sk_sq.add_(s.pow(2).sum())
if lr > 0.0:
d_hat = 2.0 * numerator_weighted / sk_sq.sqrt()
d = max(d, min(d_hat.item(), d * group['growth_rate']))
for group in self.param_groups:
group['step'] += 1
group['numerator_weighted'] = numerator_weighted
group['d'] = d
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
z = state['z']
z.copy_(state['x0'] - state['s'])
p.mul_(group['momentum']).add_(z, alpha=1.0 - group['momentum'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, momentum: float = 0.9, d0: float = 1e-06, growth_rate: float = inf, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
716,249 |
pytorch_optimizer.optimizer.dadapt
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
momentum: float = 0.9,
d0: float = 1e-6,
growth_rate: float = float('inf'),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0, range_type='[)')
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'd': d0,
'growth_rate': growth_rate,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'step': 0,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, momentum: float = 0.9, d0: float = 1e-06, growth_rate: float = inf, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
716,252 |
pytorch_optimizer.optimizer.dadapt
|
__str__
| null |
def __str__(self) -> str:
return 'DAdaptSGD'
|
(self) -> str
|
716,288 |
pytorch_optimizer.loss.dice
|
DiceLoss
|
Dice loss for image segmentation task. It supports binary, multiclass and multilabel cases.
Reference : https://github.com/BloodAxe/pytorch-toolbelt
:param mode: CLASS_MODE. loss mode 'binary', 'multiclass', or 'multilabel.
:param classes: Optional[List[int]]. List of classes that contribute in loss computation. By default,
all channels are included.
:param log_loss: bool. If True, loss computed as `-log(dice_coeff)`, otherwise `1 - dice_coeff`.
:param from_logits: bool. If True, assumes input is raw logits.
:param label_smooth: float. Smoothness constant for dice coefficient (a).
:param ignore_index: Optional[int]. Label that indicates ignored pixels (does not contribute to loss).
:param eps: float. epsilon.
|
class DiceLoss(_Loss):
r"""Dice loss for image segmentation task. It supports binary, multiclass and multilabel cases.
Reference : https://github.com/BloodAxe/pytorch-toolbelt
:param mode: CLASS_MODE. loss mode 'binary', 'multiclass', or 'multilabel.
:param classes: Optional[List[int]]. List of classes that contribute in loss computation. By default,
all channels are included.
:param log_loss: bool. If True, loss computed as `-log(dice_coeff)`, otherwise `1 - dice_coeff`.
:param from_logits: bool. If True, assumes input is raw logits.
:param label_smooth: float. Smoothness constant for dice coefficient (a).
:param ignore_index: Optional[int]. Label that indicates ignored pixels (does not contribute to loss).
:param eps: float. epsilon.
"""
def __init__(
self,
mode: CLASS_MODE = 'binary',
classes: Optional[List[int]] = None,
log_loss: bool = False,
from_logits: bool = True,
label_smooth: float = 0.0,
ignore_index: Optional[int] = None,
eps: float = 1e-6,
):
super().__init__()
if classes is not None:
if mode == 'binary':
raise ValueError('[-] Masking classes is not supported with mode=binary')
classes = torch.LongTensor(classes)
self.mode = mode
self.classes = classes
self.from_logits = from_logits
self.label_smooth = label_smooth
self.eps = eps
self.log_loss = log_loss
self.ignore_index = ignore_index
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if self.from_logits:
# Apply activations to get [0..1] class probabilities
# Using Log-Exp as this gives more numerically stable result and does not cause vanishing gradient on
# extreme values 0 and 1
y_pred = y_pred.log_softmax(dim=1).exp() if self.mode == 'multiclass' else logsigmoid(y_pred).exp()
bs: int = y_true.size(0)
num_classes: int = y_pred.size(1)
dims: Tuple[int, ...] = (0, 2)
if self.mode == 'binary':
y_true = y_true.view(bs, 1, -1)
y_pred = y_pred.view(bs, 1, -1)
if self.ignore_index is not None:
mask = y_true != self.ignore_index
y_pred = y_pred * mask
y_true = y_true * mask
if self.mode == 'multiclass':
y_true = y_true.view(bs, -1)
y_pred = y_pred.view(bs, num_classes, -1)
if self.ignore_index is not None:
mask = y_true != self.ignore_index
y_pred = y_pred * mask.unsqueeze(1)
y_true = one_hot((y_true * mask).to(torch.long), num_classes)
y_true = y_true.permute(0, 2, 1) * mask.unsqueeze(1)
else:
y_true = one_hot(y_true, num_classes)
y_true = y_true.permute(0, 2, 1)
if self.mode == 'multilabel':
y_true = y_true.view(bs, num_classes, -1)
y_pred = y_pred.view(bs, num_classes, -1)
if self.ignore_index is not None:
mask = y_true != self.ignore_index
y_pred = y_pred * mask
y_true = y_true * mask
scores = self.compute_score(
y_pred, y_true.type_as(y_pred), label_smooth=self.label_smooth, eps=self.eps, dims=dims
)
loss = -torch.log(scores.clamp_min(self.eps)) if self.log_loss else 1.0 - scores
# Dice loss is undefined for non-empty classes
# So we zero contribution of channel that does not have true pixels
# NOTE: A better workaround would be to use loss term `mean(y_pred)`
# for this case, however it will be a modified jaccard loss
mask = y_true.sum(dims) > 0
loss *= mask.to(loss.dtype)
if self.classes is not None:
loss = loss[self.classes]
return self.aggregate_loss(loss)
@staticmethod
def aggregate_loss(loss: torch.Tensor) -> torch.Tensor:
return loss.mean()
@staticmethod
def compute_score(
output: torch.Tensor,
target: torch.Tensor,
label_smooth: float = 0.0,
eps: float = 1e-6,
dims: Optional[Tuple[int, ...]] = None,
) -> torch.Tensor:
return soft_dice_score(output, target, label_smooth, eps, dims)
|
(mode: Literal['binary', 'multiclass', 'multilabel'] = 'binary', classes: Optional[List[int]] = None, log_loss: bool = False, from_logits: bool = True, label_smooth: float = 0.0, ignore_index: Optional[int] = None, eps: float = 1e-06)
|
716,294 |
pytorch_optimizer.loss.dice
|
__init__
| null |
def __init__(
self,
mode: CLASS_MODE = 'binary',
classes: Optional[List[int]] = None,
log_loss: bool = False,
from_logits: bool = True,
label_smooth: float = 0.0,
ignore_index: Optional[int] = None,
eps: float = 1e-6,
):
super().__init__()
if classes is not None:
if mode == 'binary':
raise ValueError('[-] Masking classes is not supported with mode=binary')
classes = torch.LongTensor(classes)
self.mode = mode
self.classes = classes
self.from_logits = from_logits
self.label_smooth = label_smooth
self.eps = eps
self.log_loss = log_loss
self.ignore_index = ignore_index
|
(self, mode: Literal['binary', 'multiclass', 'multilabel'] = 'binary', classes: Optional[List[int]] = None, log_loss: bool = False, from_logits: bool = True, label_smooth: float = 0.0, ignore_index: Optional[int] = None, eps: float = 1e-06)
|
716,313 |
pytorch_optimizer.loss.dice
|
aggregate_loss
| null |
@staticmethod
def aggregate_loss(loss: torch.Tensor) -> torch.Tensor:
return loss.mean()
|
(loss: torch.Tensor) -> torch.Tensor
|
716,319 |
pytorch_optimizer.loss.dice
|
compute_score
| null |
@staticmethod
def compute_score(
output: torch.Tensor,
target: torch.Tensor,
label_smooth: float = 0.0,
eps: float = 1e-6,
dims: Optional[Tuple[int, ...]] = None,
) -> torch.Tensor:
return soft_dice_score(output, target, label_smooth, eps, dims)
|
(output: torch.Tensor, target: torch.Tensor, label_smooth: float = 0.0, eps: float = 1e-06, dims: Optional[Tuple[int, ...]] = None) -> torch.Tensor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.