index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
716,326 |
pytorch_optimizer.loss.dice
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if self.from_logits:
# Apply activations to get [0..1] class probabilities
# Using Log-Exp as this gives more numerically stable result and does not cause vanishing gradient on
# extreme values 0 and 1
y_pred = y_pred.log_softmax(dim=1).exp() if self.mode == 'multiclass' else logsigmoid(y_pred).exp()
bs: int = y_true.size(0)
num_classes: int = y_pred.size(1)
dims: Tuple[int, ...] = (0, 2)
if self.mode == 'binary':
y_true = y_true.view(bs, 1, -1)
y_pred = y_pred.view(bs, 1, -1)
if self.ignore_index is not None:
mask = y_true != self.ignore_index
y_pred = y_pred * mask
y_true = y_true * mask
if self.mode == 'multiclass':
y_true = y_true.view(bs, -1)
y_pred = y_pred.view(bs, num_classes, -1)
if self.ignore_index is not None:
mask = y_true != self.ignore_index
y_pred = y_pred * mask.unsqueeze(1)
y_true = one_hot((y_true * mask).to(torch.long), num_classes)
y_true = y_true.permute(0, 2, 1) * mask.unsqueeze(1)
else:
y_true = one_hot(y_true, num_classes)
y_true = y_true.permute(0, 2, 1)
if self.mode == 'multilabel':
y_true = y_true.view(bs, num_classes, -1)
y_pred = y_pred.view(bs, num_classes, -1)
if self.ignore_index is not None:
mask = y_true != self.ignore_index
y_pred = y_pred * mask
y_true = y_true * mask
scores = self.compute_score(
y_pred, y_true.type_as(y_pred), label_smooth=self.label_smooth, eps=self.eps, dims=dims
)
loss = -torch.log(scores.clamp_min(self.eps)) if self.log_loss else 1.0 - scores
# Dice loss is undefined for non-empty classes
# So we zero contribution of channel that does not have true pixels
# NOTE: A better workaround would be to use loss term `mean(y_pred)`
# for this case, however it will be a modified jaccard loss
mask = y_true.sum(dims) > 0
loss *= mask.to(loss.dtype)
if self.classes is not None:
loss = loss[self.classes]
return self.aggregate_loss(loss)
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
716,360 |
pytorch_optimizer.optimizer.diffgrad
|
DiffGrad
|
An Optimization Method for Convolutional Neural Networks.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param rectify: bool. perform the rectified update similar to RAdam.
:param n_sma_threshold: int. (recommended is 5).
:param degenerated_to_sgd: bool. degenerated to SGD.
:param ams_bound: bool. whether to use the AMSBound variant.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class DiffGrad(Optimizer, BaseOptimizer):
r"""An Optimization Method for Convolutional Neural Networks.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param rectify: bool. perform the rectified update similar to RAdam.
:param n_sma_threshold: int. (recommended is 5).
:param degenerated_to_sgd: bool. degenerated to SGD.
:param ams_bound: bool. whether to use the AMSBound variant.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
rectify: bool = False,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = True,
ams_bound: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'rectify': rectify,
'ams_bound': ams_bound,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'diffGrad'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['previous_grad'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
step_size, n_sma = self.get_rectify_step_size(
is_rectify=group['rectify'],
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['previous_grad'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_sq,
max_exp_avg_sq=state.get('max_exp_avg_sq', None),
eps=group['eps'],
)
# compute diffGrad coefficient (dfc)
dfc = state['previous_grad'].clone()
dfc.sub_(grad).abs_().sigmoid_().mul_(exp_avg)
state['previous_grad'].copy_(grad)
if not group['rectify']:
p.addcdiv_(exp_avg, de_nom, value=-step_size)
continue
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if n_sma >= self.n_sma_threshold:
p.addcdiv_(dfc, de_nom, value=-step_size)
elif step_size > 0:
p.add_(exp_avg, alpha=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, rectify: bool = False, n_sma_threshold: int = 5, degenerated_to_sgd: bool = True, ams_bound: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
716,362 |
pytorch_optimizer.optimizer.diffgrad
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
rectify: bool = False,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = True,
ams_bound: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'rectify': rectify,
'ams_bound': ams_bound,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, rectify: bool = False, n_sma_threshold: int = 5, degenerated_to_sgd: bool = True, ams_bound: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
716,365 |
pytorch_optimizer.optimizer.diffgrad
|
__str__
| null |
def __str__(self) -> str:
return 'diffGrad'
|
(self) -> str
|
716,386 |
pytorch_optimizer.optimizer.diffgrad
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
step_size, n_sma = self.get_rectify_step_size(
is_rectify=group['rectify'],
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['previous_grad'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_sq,
max_exp_avg_sq=state.get('max_exp_avg_sq', None),
eps=group['eps'],
)
# compute diffGrad coefficient (dfc)
dfc = state['previous_grad'].clone()
dfc.sub_(grad).abs_().sigmoid_().mul_(exp_avg)
state['previous_grad'].copy_(grad)
if not group['rectify']:
p.addcdiv_(exp_avg, de_nom, value=-step_size)
continue
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if n_sma >= self.n_sma_threshold:
p.addcdiv_(dfc, de_nom, value=-step_size)
elif step_size > 0:
p.add_(exp_avg, alpha=-step_size)
return loss
|
(self)
|
716,401 |
pytorch_optimizer.optimizer.fp16
|
DynamicLossScaler
|
Dynamically adjusts the loss scaling factor.
Dynamic loss scalers are important in mixed-precision training.
They help us avoid underflows and overflows in low-precision gradients.
See here for information:
<https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html#lossscaling>
Shamelessly stolen and adapted from FairSeq.
<https://github.com/pytorch/fairseq/blob/main/fairseq/optim/fp16_optimizer.py>
Reference : 'https://github.com/facebookresearch/ParlAI/blob/main/parlai/utils/fp16.py'
:param init_scale: Initial loss scale.
:param scale_factor: Factor by which to increase or decrease loss scale.
:param scale_window: If we do not experience overflow in scale_window iterations,
loss scale will increase by scale_factor.
:param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale.
:param threshold: If not None, loss scale will decrease below this threshold.
|
class DynamicLossScaler:
r"""Dynamically adjusts the loss scaling factor.
Dynamic loss scalers are important in mixed-precision training.
They help us avoid underflows and overflows in low-precision gradients.
See here for information:
<https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html#lossscaling>
Shamelessly stolen and adapted from FairSeq.
<https://github.com/pytorch/fairseq/blob/main/fairseq/optim/fp16_optimizer.py>
Reference : 'https://github.com/facebookresearch/ParlAI/blob/main/parlai/utils/fp16.py'
:param init_scale: Initial loss scale.
:param scale_factor: Factor by which to increase or decrease loss scale.
:param scale_window: If we do not experience overflow in scale_window iterations,
loss scale will increase by scale_factor.
:param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale.
:param threshold: If not None, loss scale will decrease below this threshold.
"""
def __init__(
self,
init_scale: float = 2.0 ** 15,
scale_factor: float = 2.0,
scale_window: int = 2000,
tolerance: float = 0.00,
threshold: Optional[float] = None,
): # fmt: skip
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self.iter: int = 0
self.last_overflow_iter: int = -1
self.last_rescale_iter: int = -1
self.overflows_since_rescale: int = 0
self.has_overflow_serial: bool = False
def update_scale(self, overflow: bool):
r"""Update the loss scale.
If overflow exceeds our tolerance, we decrease the loss scale.
If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.
:param overflow: bool. adjust scales to prevent overflow.
"""
iter_since_rescale: int = self.iter - self.last_rescale_iter
if overflow:
# calculate how often we overflowed already
self.last_overflow_iter = self.iter
self.overflows_since_rescale += 1
pct_overflow: float = self.overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
# decrease loss scale by the scale factor
self.decrease_loss_scale()
# reset iterations
self.last_rescale_iter = self.iter
self.overflows_since_rescale = 0
elif (self.iter - self.last_overflow_iter) % self.scale_window == 0:
# increase the loss scale by scale factor
self.loss_scale *= self.scale_factor
self.last_rescale_iter = self.iter
self.iter += 1
def decrease_loss_scale(self):
r"""Decrease the loss scale by self.scale_factor.
NOTE: the loss_scale will not go below `self.threshold`.
"""
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
|
(init_scale: float = 32768.0, scale_factor: float = 2.0, scale_window: int = 2000, tolerance: float = 0.0, threshold: Optional[float] = None)
|
716,402 |
pytorch_optimizer.optimizer.fp16
|
__init__
| null |
def __init__(
self,
init_scale: float = 2.0 ** 15,
scale_factor: float = 2.0,
scale_window: int = 2000,
tolerance: float = 0.00,
threshold: Optional[float] = None,
): # fmt: skip
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self.iter: int = 0
self.last_overflow_iter: int = -1
self.last_rescale_iter: int = -1
self.overflows_since_rescale: int = 0
self.has_overflow_serial: bool = False
|
(self, init_scale: float = 32768.0, scale_factor: float = 2.0, scale_window: int = 2000, tolerance: float = 0.0, threshold: Optional[float] = None)
|
716,403 |
pytorch_optimizer.optimizer.fp16
|
decrease_loss_scale
|
Decrease the loss scale by self.scale_factor.
NOTE: the loss_scale will not go below `self.threshold`.
|
def decrease_loss_scale(self):
r"""Decrease the loss scale by self.scale_factor.
NOTE: the loss_scale will not go below `self.threshold`.
"""
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
|
(self)
|
716,404 |
pytorch_optimizer.optimizer.fp16
|
update_scale
|
Update the loss scale.
If overflow exceeds our tolerance, we decrease the loss scale.
If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.
:param overflow: bool. adjust scales to prevent overflow.
|
def update_scale(self, overflow: bool):
r"""Update the loss scale.
If overflow exceeds our tolerance, we decrease the loss scale.
If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.
:param overflow: bool. adjust scales to prevent overflow.
"""
iter_since_rescale: int = self.iter - self.last_rescale_iter
if overflow:
# calculate how often we overflowed already
self.last_overflow_iter = self.iter
self.overflows_since_rescale += 1
pct_overflow: float = self.overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
# decrease loss scale by the scale factor
self.decrease_loss_scale()
# reset iterations
self.last_rescale_iter = self.iter
self.overflows_since_rescale = 0
elif (self.iter - self.last_overflow_iter) % self.scale_window == 0:
# increase the loss scale by scale factor
self.loss_scale *= self.scale_factor
self.last_rescale_iter = self.iter
self.iter += 1
|
(self, overflow: bool)
|
716,405 |
pytorch_optimizer.loss.focal
|
FocalCosineLoss
|
Focal Cosine loss function w/ logit input.
:param alpha: float. alpha.
:param gamma: float. gamma.
:param focal_weight: float. weight of focal loss.
:param reduction: str. type of reduction.
|
class FocalCosineLoss(nn.Module):
r"""Focal Cosine loss function w/ logit input.
:param alpha: float. alpha.
:param gamma: float. gamma.
:param focal_weight: float. weight of focal loss.
:param reduction: str. type of reduction.
"""
def __init__(self, alpha: float = 1.0, gamma: float = 2.0, focal_weight: float = 0.1, reduction: str = 'mean'):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.focal_weight = focal_weight
self.reduction = reduction
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
cosine_loss = cosine_embedding_loss(
y_pred,
one_hot(y_true, num_classes=y_pred.size(-1)),
torch.tensor([1], device=y_true.device),
reduction=self.reduction,
)
ce_loss = cross_entropy(normalize(y_pred), y_true, reduction='none')
pt = torch.exp(-ce_loss)
focal_loss = (self.alpha * (1 - pt) ** self.gamma * ce_loss).mean()
return cosine_loss + self.focal_weight * focal_loss
|
(alpha: float = 1.0, gamma: float = 2.0, focal_weight: float = 0.1, reduction: str = 'mean')
|
716,411 |
pytorch_optimizer.loss.focal
|
__init__
| null |
def __init__(self, alpha: float = 1.0, gamma: float = 2.0, focal_weight: float = 0.1, reduction: str = 'mean'):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.focal_weight = focal_weight
self.reduction = reduction
|
(self, alpha: float = 1.0, gamma: float = 2.0, focal_weight: float = 0.1, reduction: str = 'mean')
|
716,441 |
pytorch_optimizer.loss.focal
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
cosine_loss = cosine_embedding_loss(
y_pred,
one_hot(y_true, num_classes=y_pred.size(-1)),
torch.tensor([1], device=y_true.device),
reduction=self.reduction,
)
ce_loss = cross_entropy(normalize(y_pred), y_true, reduction='none')
pt = torch.exp(-ce_loss)
focal_loss = (self.alpha * (1 - pt) ** self.gamma * ce_loss).mean()
return cosine_loss + self.focal_weight * focal_loss
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
716,475 |
pytorch_optimizer.loss.focal
|
FocalLoss
|
Focal loss function w/ logit input.
:param alpha: float. alpha.
:param gamma: float. gamma.
|
class FocalLoss(nn.Module):
r"""Focal loss function w/ logit input.
:param alpha: float. alpha.
:param gamma: float. gamma.
"""
def __init__(self, alpha: float = 1.0, gamma: float = 2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
bce_loss = binary_cross_entropy_with_logits(y_pred, y_true, reduction='none')
pt = torch.exp(-bce_loss)
focal_loss = self.alpha * (1 - pt) ** self.gamma * bce_loss
return focal_loss.mean()
|
(alpha: float = 1.0, gamma: float = 2.0)
|
716,481 |
pytorch_optimizer.loss.focal
|
__init__
| null |
def __init__(self, alpha: float = 1.0, gamma: float = 2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
|
(self, alpha: float = 1.0, gamma: float = 2.0)
|
716,511 |
pytorch_optimizer.loss.focal
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
bce_loss = binary_cross_entropy_with_logits(y_pred, y_true, reduction='none')
pt = torch.exp(-bce_loss)
focal_loss = self.alpha * (1 - pt) ** self.gamma * bce_loss
return focal_loss.mean()
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
716,545 |
pytorch_optimizer.loss.focal
|
FocalTverskyLoss
|
Focal Tversky Loss w/ logits input.
:param alpha: float. alpha.
:param beta: float. beta.
:param gamma: float. gamma.
:param smooth: float. smooth factor.
|
class FocalTverskyLoss(nn.Module):
r"""Focal Tversky Loss w/ logits input.
:param alpha: float. alpha.
:param beta: float. beta.
:param gamma: float. gamma.
:param smooth: float. smooth factor.
"""
def __init__(self, alpha: float = 0.5, beta: float = 0.5, gamma: float = 1.0, smooth: float = 1e-6):
super().__init__()
self.gamma = gamma
self.tversky = TverskyLoss(alpha, beta, smooth)
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
return self.tversky(y_pred, y_true) ** self.gamma
|
(alpha: float = 0.5, beta: float = 0.5, gamma: float = 1.0, smooth: float = 1e-06)
|
716,551 |
pytorch_optimizer.loss.focal
|
__init__
| null |
def __init__(self, alpha: float = 0.5, beta: float = 0.5, gamma: float = 1.0, smooth: float = 1e-6):
super().__init__()
self.gamma = gamma
self.tversky = TverskyLoss(alpha, beta, smooth)
|
(self, alpha: float = 0.5, beta: float = 0.5, gamma: float = 1.0, smooth: float = 1e-06)
|
716,581 |
pytorch_optimizer.loss.focal
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
return self.tversky(y_pred, y_true) ** self.gamma
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
716,615 |
pytorch_optimizer.optimizer.fromage
|
Fromage
|
On the distance between two neural networks and the stability of learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param p_bound: Optional[float]. Restricts the optimisation to a bounded set. A value of 2.0 restricts parameter
norms to lie within 2x their initial norms. This regularises the model class.
|
class Fromage(Optimizer, BaseOptimizer):
r"""On the distance between two neural networks and the stability of learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param p_bound: Optional[float]. Restricts the optimisation to a bounded set. A value of 2.0 restricts parameter
norms to lie within 2x their initial norms. This regularises the model class.
"""
def __init__(self, params: PARAMETERS, lr: float = 1e-2, p_bound: Optional[float] = None):
self.validate_learning_rate(lr)
self.p_bound = p_bound
defaults: DEFAULTS = {'lr': lr}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Fromage'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if self.p_bound is not None:
state['max'] = p.norm().mul_(self.p_bound)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
pre_factor: float = math.sqrt(1 + group['lr'] ** 2)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0 and self.p_bound is not None:
state['max'] = p.norm().mul_(self.p_bound)
p_norm, g_norm = p.norm(), grad.norm()
if p_norm > 0.0 and g_norm > 0.0:
p.add_(grad * (p_norm / g_norm), alpha=-group['lr'])
else:
p.add_(grad, alpha=-group['lr'])
p.div_(pre_factor)
if self.p_bound is not None:
p_norm = p.norm()
if p_norm > state['max']:
p.mul_(state['max']).div_(p_norm)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, p_bound: Optional[float] = None)
|
716,617 |
pytorch_optimizer.optimizer.fromage
|
__init__
| null |
def __init__(self, params: PARAMETERS, lr: float = 1e-2, p_bound: Optional[float] = None):
self.validate_learning_rate(lr)
self.p_bound = p_bound
defaults: DEFAULTS = {'lr': lr}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, p_bound: Optional[float] = None)
|
716,620 |
pytorch_optimizer.optimizer.fromage
|
__str__
| null |
def __str__(self) -> str:
return 'Fromage'
|
(self) -> str
|
716,656 |
pytorch_optimizer.optimizer.gsam
|
GSAM
|
Surrogate Gap Guided Sharpness-Aware Minimization.
Example:
-------
Here's an example::
model = YourModel()
base_optimizer = AdamP(model.parameters())
lr_scheduler = LinearScheduler(base_optimizer, t_max=num_total_steps)
rho_scheduler = ProportionScheduler(lr_scheduler, max_lr=max_lr)
optimizer = GSAM(model.parameters(), base_optimizer, model, rho_scheduler)
def loss_fn(predictions, targets):
return F.cross_entropy(predictions, targets)
for inputs, targets in data:
optimizer.set_closure(loss_fn, inputs, targets)
predictions, loss = optimizer.step()
lr_scheduler.step()
optimizer.update_rho_t()
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param base_optimizer: Optimizer. base optimizer.
:param model: nn.Module. model.
:param alpha: float. rho alpha.
:param rho_scheduler: rho scheduler.
:param adaptive: bool. element-wise Adaptive SAM.
:param perturb_eps: float. epsilon for perturbation.
:param kwargs: Dict. parameters for optimizer.
|
class GSAM(Optimizer, BaseOptimizer):
r"""Surrogate Gap Guided Sharpness-Aware Minimization.
Example:
-------
Here's an example::
model = YourModel()
base_optimizer = AdamP(model.parameters())
lr_scheduler = LinearScheduler(base_optimizer, t_max=num_total_steps)
rho_scheduler = ProportionScheduler(lr_scheduler, max_lr=max_lr)
optimizer = GSAM(model.parameters(), base_optimizer, model, rho_scheduler)
def loss_fn(predictions, targets):
return F.cross_entropy(predictions, targets)
for inputs, targets in data:
optimizer.set_closure(loss_fn, inputs, targets)
predictions, loss = optimizer.step()
lr_scheduler.step()
optimizer.update_rho_t()
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param base_optimizer: Optimizer. base optimizer.
:param model: nn.Module. model.
:param alpha: float. rho alpha.
:param rho_scheduler: rho scheduler.
:param adaptive: bool. element-wise Adaptive SAM.
:param perturb_eps: float. epsilon for perturbation.
:param kwargs: Dict. parameters for optimizer.
"""
def __init__(
self,
params: PARAMETERS,
base_optimizer: OPTIMIZER,
model: nn.Module,
rho_scheduler,
alpha: float = 0.4,
adaptive: bool = False,
perturb_eps: float = 1e-12,
**kwargs,
):
self.validate_range(alpha, 'alpha', 0.0, 1.0)
self.model = model
self.rho_scheduler = rho_scheduler
self.alpha = alpha
self.adaptive = adaptive
self.perturb_eps = perturb_eps
self.rho_t: float = 0.0
self.forward_backward_func: Optional[Callable] = None
if hasattr(ReduceOp, 'AVG'):
self.grad_reduce = ReduceOp.AVG
self.manual_average: bool = False
else: # PyTorch <= 1.11.0 does not have AVG, need to manually average across processes
self.grad_reduce = ReduceOp.SUM
self.manual_average: bool = True
self.base_optimizer = base_optimizer
self.param_groups = self.base_optimizer.param_groups
defaults: DEFAULTS = {'adaptive': adaptive}
defaults.update(kwargs)
super().__init__(params, defaults)
self.update_rho_t()
def __str__(self) -> str:
return 'GSAM'
@torch.no_grad()
def reset(self):
pass
@torch.no_grad()
def update_rho_t(self) -> float:
self.rho_t = self.rho_scheduler.step()
return self.rho_t
@torch.no_grad()
def perturb_weights(self, rho: float):
grad_norm = self.grad_norm(weight_adaptive=self.adaptive)
for group in self.param_groups:
scale = rho / (grad_norm + self.perturb_eps)
for p in group['params']:
if p.grad is None:
continue
self.state[p]['old_g'] = p.grad.clone()
e_w = (torch.pow(p, 2) if self.adaptive else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
self.state[p]['e_w'] = e_w
@torch.no_grad()
def un_perturb(self):
for group in self.param_groups:
for p in group['params']:
if 'e_w' in self.state[p]:
p.sub_(self.state[p]['e_w'])
@torch.no_grad()
def gradient_decompose(self, alpha: float = 0.0):
inner_prod = 0.0
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
inner_prod += torch.sum(self.state[p]['old_g'] * p.grad)
new_grad_norm = self.grad_norm(by=None)
old_grad_norm = self.grad_norm(by='old_g')
cosine = inner_prod / (new_grad_norm * old_grad_norm + self.perturb_eps)
# gradient decomposition
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
vertical = self.state[p]['old_g'] - cosine * old_grad_norm * p.grad / (
new_grad_norm + self.perturb_eps
)
p.grad.add_(vertical, alpha=-alpha)
@torch.no_grad()
def sync_grad(self):
if is_initialized():
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
all_reduce(p.grad, op=self.grad_reduce)
if self.manual_average:
p.grad.div_(float(get_world_size()))
@torch.no_grad()
def grad_norm(self, by: Optional[str] = None, weight_adaptive: bool = False) -> torch.Tensor:
return torch.norm(
torch.stack(
[
((torch.abs(p) if weight_adaptive else 1.0) * (p.grad if not by else self.state[p][by])).norm(p=2)
for group in self.param_groups
for p in group['params']
if p.grad is not None
]
),
p=2,
)
def maybe_no_sync(self):
return self.model.no_sync() if is_initialized() else ExitStack()
@torch.no_grad()
def set_closure(self, loss_fn: nn.Module, inputs: torch.Tensor, targets: torch.Tensor, **kwargs):
r"""Set closure.
Create `self.forward_backward_func`, which is a function such that `self.forward_backward_func()`
automatically performs forward and backward passes. This function does not take any arguments,
and the inputs and targets data should be pre-set in the definition of partial-function.
:param loss_fn: nn.Module. loss function.
:param inputs: torch.Tensor. inputs.
:param targets: torch.Tensor. targets.
"""
def get_grad():
self.base_optimizer.zero_grad()
with torch.enable_grad():
outputs = self.model(inputs)
loss = loss_fn(outputs, targets, **kwargs)
loss.backward()
return outputs, loss.detach()
self.forward_backward_func = get_grad
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> Tuple[torch.Tensor, float]:
get_grad = closure if closure else self.forward_backward_func
with self.maybe_no_sync():
outputs, loss = get_grad()
self.perturb_weights(rho=self.rho_t)
disable_running_stats(self.model)
get_grad()
self.gradient_decompose(self.alpha)
self.un_perturb()
self.sync_grad()
self.base_optimizer.step()
enable_running_stats(self.model)
return outputs, loss
def load_state_dict(self, state_dict: Dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], base_optimizer: Type[torch.optim.optimizer.Optimizer], model: torch.nn.modules.module.Module, rho_scheduler, alpha: float = 0.4, adaptive: bool = False, perturb_eps: float = 1e-12, **kwargs)
|
716,658 |
pytorch_optimizer.optimizer.gsam
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
base_optimizer: OPTIMIZER,
model: nn.Module,
rho_scheduler,
alpha: float = 0.4,
adaptive: bool = False,
perturb_eps: float = 1e-12,
**kwargs,
):
self.validate_range(alpha, 'alpha', 0.0, 1.0)
self.model = model
self.rho_scheduler = rho_scheduler
self.alpha = alpha
self.adaptive = adaptive
self.perturb_eps = perturb_eps
self.rho_t: float = 0.0
self.forward_backward_func: Optional[Callable] = None
if hasattr(ReduceOp, 'AVG'):
self.grad_reduce = ReduceOp.AVG
self.manual_average: bool = False
else: # PyTorch <= 1.11.0 does not have AVG, need to manually average across processes
self.grad_reduce = ReduceOp.SUM
self.manual_average: bool = True
self.base_optimizer = base_optimizer
self.param_groups = self.base_optimizer.param_groups
defaults: DEFAULTS = {'adaptive': adaptive}
defaults.update(kwargs)
super().__init__(params, defaults)
self.update_rho_t()
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], base_optimizer: Type[torch.optim.optimizer.Optimizer], model: torch.nn.modules.module.Module, rho_scheduler, alpha: float = 0.4, adaptive: bool = False, perturb_eps: float = 1e-12, **kwargs)
|
716,661 |
pytorch_optimizer.optimizer.gsam
|
__str__
| null |
def __str__(self) -> str:
return 'GSAM'
|
(self) -> str
|
716,674 |
pytorch_optimizer.optimizer.gsam
|
grad_norm
| null |
@torch.no_grad()
def perturb_weights(self, rho: float):
grad_norm = self.grad_norm(weight_adaptive=self.adaptive)
for group in self.param_groups:
scale = rho / (grad_norm + self.perturb_eps)
for p in group['params']:
if p.grad is None:
continue
self.state[p]['old_g'] = p.grad.clone()
e_w = (torch.pow(p, 2) if self.adaptive else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
self.state[p]['e_w'] = e_w
|
(self, by: Optional[str] = None, weight_adaptive: bool = False) -> torch.Tensor
|
716,676 |
pytorch_optimizer.optimizer.gsam
|
load_state_dict
| null |
def load_state_dict(self, state_dict: Dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
|
(self, state_dict: Dict)
|
716,677 |
pytorch_optimizer.optimizer.gsam
|
maybe_no_sync
| null |
def maybe_no_sync(self):
return self.model.no_sync() if is_initialized() else ExitStack()
|
(self)
|
716,687 |
pytorch_optimizer.optimizer.gsam
|
set_closure
|
Set closure.
Create `self.forward_backward_func`, which is a function such that `self.forward_backward_func()`
automatically performs forward and backward passes. This function does not take any arguments,
and the inputs and targets data should be pre-set in the definition of partial-function.
:param loss_fn: nn.Module. loss function.
:param inputs: torch.Tensor. inputs.
:param targets: torch.Tensor. targets.
|
@torch.no_grad()
def perturb_weights(self, rho: float):
grad_norm = self.grad_norm(weight_adaptive=self.adaptive)
for group in self.param_groups:
scale = rho / (grad_norm + self.perturb_eps)
for p in group['params']:
if p.grad is None:
continue
self.state[p]['old_g'] = p.grad.clone()
e_w = (torch.pow(p, 2) if self.adaptive else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
self.state[p]['e_w'] = e_w
|
(self, loss_fn: torch.nn.modules.module.Module, inputs: torch.Tensor, targets: torch.Tensor, **kwargs)
|
716,705 |
pytorch_optimizer.optimizer.shampoo_utils
|
Graft
|
Base class to perform grafting onto Shampoo. This class does no grafting.
|
class Graft:
r"""Base class to perform grafting onto Shampoo. This class does no grafting."""
def __init__(self, *args):
pass
def add_statistics(self, grad: torch.Tensor, unused_beta2: float):
r"""Add the statistics."""
pass
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad
def update_momentum(self, update: torch.Tensor, unused_beta1: float) -> torch.Tensor: # noqa: ARG002
r"""Update momentum."""
return update
|
(*args)
|
716,706 |
pytorch_optimizer.optimizer.shampoo_utils
|
__init__
| null |
def __init__(self, *args):
pass
|
(self, *args)
|
716,707 |
pytorch_optimizer.optimizer.shampoo_utils
|
add_statistics
|
Add the statistics.
|
def add_statistics(self, grad: torch.Tensor, unused_beta2: float):
r"""Add the statistics."""
pass
|
(self, grad: torch.Tensor, unused_beta2: float)
|
716,708 |
pytorch_optimizer.optimizer.shampoo_utils
|
precondition_gradient
|
Get preconditioned gradient.
|
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad
|
(self, grad: torch.Tensor) -> torch.Tensor
|
716,709 |
pytorch_optimizer.optimizer.shampoo_utils
|
update_momentum
|
Update momentum.
|
def update_momentum(self, update: torch.Tensor, unused_beta1: float) -> torch.Tensor: # noqa: ARG002
r"""Update momentum."""
return update
|
(self, update: torch.Tensor, unused_beta1: float) -> torch.Tensor
|
716,710 |
pytorch_optimizer.optimizer.gravity
|
Gravity
|
a Kinematic Approach on Optimization in Deep Learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param alpha: float. alpha controls the V initialization.
:param beta: float. beta will be used to compute running average of V.
|
class Gravity(Optimizer, BaseOptimizer):
r"""a Kinematic Approach on Optimization in Deep Learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param alpha: float. alpha controls the V initialization.
:param beta: float. beta will be used to compute running average of V.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-2,
alpha: float = 0.01,
beta: float = 0.9,
):
self.validate_learning_rate(lr)
self.validate_range(alpha, 'alpha', 0.0, 1.0)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
defaults: DEFAULTS = {'lr': lr, 'alpha': alpha, 'beta': beta}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Gravity'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['v'] = torch.empty_like(p).normal_(mean=0.0, std=group['alpha'] / group['lr'])
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta_t: float = (group['beta'] * group['step'] + 1) / (group['step'] + 2)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['v'] = torch.empty_like(p).normal_(mean=0.0, std=group['alpha'] / group['lr'])
v = state['v']
m = 1.0 / grad.abs().max()
zeta = grad / (1.0 + (grad / m) ** 2)
v.mul_(beta_t).add_(zeta, alpha=1.0 - beta_t)
p.add_(v, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, alpha: float = 0.01, beta: float = 0.9)
|
716,712 |
pytorch_optimizer.optimizer.gravity
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-2,
alpha: float = 0.01,
beta: float = 0.9,
):
self.validate_learning_rate(lr)
self.validate_range(alpha, 'alpha', 0.0, 1.0)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
defaults: DEFAULTS = {'lr': lr, 'alpha': alpha, 'beta': beta}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, alpha: float = 0.01, beta: float = 0.9)
|
716,715 |
pytorch_optimizer.optimizer.gravity
|
__str__
| null |
def __str__(self) -> str:
return 'Gravity'
|
(self) -> str
|
716,751 |
pytorch_optimizer.loss.jaccard
|
JaccardLoss
|
Jaccard loss for image segmentation task. It supports binary, multiclass and multilabel cases.
Reference : https://github.com/BloodAxe/pytorch-toolbelt
:param mode: CLASS_MODE. loss mode 'binary', 'multiclass', or 'multilabel.
:param classes: Optional[List[int]]. List of classes that contribute in loss computation. By default,
all channels are included.
:param log_loss: If True, loss computed as `-log(jaccard)`; otherwise `1 - jaccard`
:param from_logits: bool. If True, assumes input is raw logits.
:param label_smooth: float. Smoothness constant for dice coefficient (a).
:param eps: float. epsilon.
|
class JaccardLoss(_Loss):
r"""Jaccard loss for image segmentation task. It supports binary, multiclass and multilabel cases.
Reference : https://github.com/BloodAxe/pytorch-toolbelt
:param mode: CLASS_MODE. loss mode 'binary', 'multiclass', or 'multilabel.
:param classes: Optional[List[int]]. List of classes that contribute in loss computation. By default,
all channels are included.
:param log_loss: If True, loss computed as `-log(jaccard)`; otherwise `1 - jaccard`
:param from_logits: bool. If True, assumes input is raw logits.
:param label_smooth: float. Smoothness constant for dice coefficient (a).
:param eps: float. epsilon.
"""
def __init__(
self,
mode: CLASS_MODE,
classes: List[int] = None,
log_loss: bool = False,
from_logits: bool = True,
label_smooth: float = 0.0,
eps: float = 1e-6,
):
super().__init__()
if classes is not None:
if mode == 'binary':
raise ValueError('[-] Masking classes is not supported with mode=binary')
classes = torch.LongTensor(classes)
self.mode = mode
self.classes = classes
self.log_loss = log_loss
self.from_logits = from_logits
self.label_smooth = label_smooth
self.eps = eps
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if self.from_logits:
# Apply activations to get [0..1] class probabilities
# Using Log-Exp as this gives more numerically stable result and does not cause vanishing gradient on
# extreme values 0 and 1
y_pred = y_pred.log_softmax(dim=1).exp() if self.mode == 'multiclass' else logsigmoid(y_pred).exp()
bs: int = y_true.size(0)
num_classes: int = y_pred.size(1)
dims: Tuple[int, ...] = (0, 2)
if self.mode == 'binary':
y_true = y_true.view(bs, 1, -1)
y_pred = y_pred.view(bs, 1, -1)
if self.mode == 'multiclass':
y_true = y_true.view(bs, -1)
y_pred = y_pred.view(bs, num_classes, -1)
y_true = one_hot(y_true, num_classes)
y_true = y_true.permute(0, 2, 1)
if self.mode == 'multilabel':
y_true = y_true.view(bs, num_classes, -1)
y_pred = y_pred.view(bs, num_classes, -1)
scores = soft_jaccard_score(
y_pred, y_true.type(y_pred.dtype), label_smooth=self.label_smooth, eps=self.eps, dims=dims
)
loss = -torch.log(scores.clamp_min(self.eps)) if self.log_loss else 1.0 - scores
# IoU loss is defined for non-empty classes
# So we zero contribution of channel that does not have true pixels
# NOTE: A better workaround would be to use loss term `mean(y_pred)`
# for this case, however it will be a modified jaccard loss
mask = y_true.sum(dims) > 0
loss *= mask.float()
if self.classes is not None:
loss = loss[self.classes]
return loss.mean()
|
(mode: Literal['binary', 'multiclass', 'multilabel'], classes: List[int] = None, log_loss: bool = False, from_logits: bool = True, label_smooth: float = 0.0, eps: float = 1e-06)
|
716,757 |
pytorch_optimizer.loss.jaccard
|
__init__
| null |
def __init__(
self,
mode: CLASS_MODE,
classes: List[int] = None,
log_loss: bool = False,
from_logits: bool = True,
label_smooth: float = 0.0,
eps: float = 1e-6,
):
super().__init__()
if classes is not None:
if mode == 'binary':
raise ValueError('[-] Masking classes is not supported with mode=binary')
classes = torch.LongTensor(classes)
self.mode = mode
self.classes = classes
self.log_loss = log_loss
self.from_logits = from_logits
self.label_smooth = label_smooth
self.eps = eps
|
(self, mode: Literal['binary', 'multiclass', 'multilabel'], classes: Optional[List[int]] = None, log_loss: bool = False, from_logits: bool = True, label_smooth: float = 0.0, eps: float = 1e-06)
|
716,787 |
pytorch_optimizer.loss.jaccard
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if self.from_logits:
# Apply activations to get [0..1] class probabilities
# Using Log-Exp as this gives more numerically stable result and does not cause vanishing gradient on
# extreme values 0 and 1
y_pred = y_pred.log_softmax(dim=1).exp() if self.mode == 'multiclass' else logsigmoid(y_pred).exp()
bs: int = y_true.size(0)
num_classes: int = y_pred.size(1)
dims: Tuple[int, ...] = (0, 2)
if self.mode == 'binary':
y_true = y_true.view(bs, 1, -1)
y_pred = y_pred.view(bs, 1, -1)
if self.mode == 'multiclass':
y_true = y_true.view(bs, -1)
y_pred = y_pred.view(bs, num_classes, -1)
y_true = one_hot(y_true, num_classes)
y_true = y_true.permute(0, 2, 1)
if self.mode == 'multilabel':
y_true = y_true.view(bs, num_classes, -1)
y_pred = y_pred.view(bs, num_classes, -1)
scores = soft_jaccard_score(
y_pred, y_true.type(y_pred.dtype), label_smooth=self.label_smooth, eps=self.eps, dims=dims
)
loss = -torch.log(scores.clamp_min(self.eps)) if self.log_loss else 1.0 - scores
# IoU loss is defined for non-empty classes
# So we zero contribution of channel that does not have true pixels
# NOTE: A better workaround would be to use loss term `mean(y_pred)`
# for this case, however it will be a modified jaccard loss
mask = y_true.sum(dims) > 0
loss *= mask.float()
if self.classes is not None:
loss = loss[self.classes]
return loss.mean()
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
716,821 |
pytorch_optimizer.optimizer.lars
|
LARS
|
Layer-wise Adaptive Rate Scaling (no rate scaling or weight decay for parameters <= 1D).
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param weight_decay: float. weight decay (L2 penalty).
:param momentum: float. momentum.
:param dampening: float. dampening for momentum.
:param trust_coefficient: float. trust_coefficient.
:param nesterov: bool. enables nesterov momentum.
|
class LARS(Optimizer, BaseOptimizer):
r"""Layer-wise Adaptive Rate Scaling (no rate scaling or weight decay for parameters <= 1D).
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param weight_decay: float. weight decay (L2 penalty).
:param momentum: float. momentum.
:param dampening: float. dampening for momentum.
:param trust_coefficient: float. trust_coefficient.
:param nesterov: bool. enables nesterov momentum.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
weight_decay: float = 0.0,
momentum: float = 0.9,
dampening: float = 0.0,
trust_coefficient: float = 1e-3,
nesterov: bool = False,
):
self.validate_learning_rate(lr)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_range(dampening, 'dampening', 0.0, 1.0)
self.validate_non_negative(trust_coefficient, 'trust_coefficient')
defaults: DEFAULTS = {
'lr': lr,
'weight_decay': weight_decay,
'momentum': momentum,
'dampening': dampening,
'trust_coefficient': trust_coefficient,
'nesterov': nesterov,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Lars'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['mu'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
if p.ndim > 1: # if not normalization gamma/beta or bias
param_norm = torch.linalg.norm(p)
update_norm = torch.linalg.norm(grad)
one = torch.ones_like(param_norm)
trust_ratio = torch.where(
param_norm > 0.0,
torch.where(update_norm > 0.0, (group['trust_coefficient'] * param_norm / update_norm), one),
one,
)
grad.add_(p, alpha=group['weight_decay'])
grad.mul_(trust_ratio)
if group['momentum'] > 0.0:
state = self.state[p]
if 'momentum_buffer' not in state:
state['momentum_buffer'] = grad.clone().detach()
mb = state['momentum_buffer']
mb.mul_(group['momentum']).add_(grad, alpha=1.0 - group['dampening'])
if group['nesterov']:
grad.add_(mb, alpha=group['momentum'])
else:
grad.copy_(mb)
p.add_(grad, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, weight_decay: float = 0.0, momentum: float = 0.9, dampening: float = 0.0, trust_coefficient: float = 0.001, nesterov: bool = False)
|
716,823 |
pytorch_optimizer.optimizer.lars
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
weight_decay: float = 0.0,
momentum: float = 0.9,
dampening: float = 0.0,
trust_coefficient: float = 1e-3,
nesterov: bool = False,
):
self.validate_learning_rate(lr)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_range(dampening, 'dampening', 0.0, 1.0)
self.validate_non_negative(trust_coefficient, 'trust_coefficient')
defaults: DEFAULTS = {
'lr': lr,
'weight_decay': weight_decay,
'momentum': momentum,
'dampening': dampening,
'trust_coefficient': trust_coefficient,
'nesterov': nesterov,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, weight_decay: float = 0.0, momentum: float = 0.9, dampening: float = 0.0, trust_coefficient: float = 0.001, nesterov: bool = False)
|
716,826 |
pytorch_optimizer.optimizer.lars
|
__str__
| null |
def __str__(self) -> str:
return 'Lars'
|
(self) -> str
|
716,862 |
pytorch_optimizer.loss.ldam
|
LDAMLoss
|
LDAM Loss.
:param num_class_list: List[int]. list of number of class.
:param max_m: float. max margin (`C` term in the paper).
:param weight: Optional[torch.Tensor]. class weight.
:param s: float. scaler.
|
class LDAMLoss(nn.Module):
r"""LDAM Loss.
:param num_class_list: List[int]. list of number of class.
:param max_m: float. max margin (`C` term in the paper).
:param weight: Optional[torch.Tensor]. class weight.
:param s: float. scaler.
"""
def __init__(
self, num_class_list: List[int], max_m: float = 0.5, weight: Optional[torch.Tensor] = None, s: float = 30.0
):
super().__init__()
cls_num_list = np.asarray(num_class_list)
m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))
m_list *= max_m / np.max(m_list)
self.m_list = torch.FloatTensor(m_list).unsqueeze(0)
self.weight = weight
self.s = s
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
index = torch.zeros_like(y_pred, dtype=torch.bool)
index.scatter_(1, y_true.view(-1, 1), 1)
batch_m = torch.matmul(self.m_list.to(index.device), index.float().transpose(0, 1))
batch_m = batch_m.view((-1, 1))
x_m = y_pred - batch_m
output = torch.where(index, x_m, y_pred)
return cross_entropy(self.s * output, y_true, weight=self.weight)
|
(num_class_list: List[int], max_m: float = 0.5, weight: Optional[torch.Tensor] = None, s: float = 30.0)
|
716,868 |
pytorch_optimizer.loss.ldam
|
__init__
| null |
def __init__(
self, num_class_list: List[int], max_m: float = 0.5, weight: Optional[torch.Tensor] = None, s: float = 30.0
):
super().__init__()
cls_num_list = np.asarray(num_class_list)
m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))
m_list *= max_m / np.max(m_list)
self.m_list = torch.FloatTensor(m_list).unsqueeze(0)
self.weight = weight
self.s = s
|
(self, num_class_list: List[int], max_m: float = 0.5, weight: Optional[torch.Tensor] = None, s: float = 30.0)
|
716,898 |
pytorch_optimizer.loss.ldam
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
index = torch.zeros_like(y_pred, dtype=torch.bool)
index.scatter_(1, y_true.view(-1, 1), 1)
batch_m = torch.matmul(self.m_list.to(index.device), index.float().transpose(0, 1))
batch_m = batch_m.view((-1, 1))
x_m = y_pred - batch_m
output = torch.where(index, x_m, y_pred)
return cross_entropy(self.s * output, y_true, weight=self.weight)
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
716,932 |
pytorch_optimizer.optimizer.lomo
|
LOMO
|
Full Parameter Fine-tuning for Large Language Models with Limited Resources.
Reference : https://github.com/OpenLMLab/LOMO/blob/main/src/lomo.py
Check the usage from here : https://github.com/OpenLMLab/LOMO/blob/main/src/lomo_trainer.py
:param model: nn.Module. pytorch model.
:param lr: float. learning rate.
:param clip_grad_norm: Optional[float]. clip grad norm.
:param clip_grad_value: Optional[float]. clip grad value.
|
class LOMO(BaseOptimizer, Optimizer):
r"""Full Parameter Fine-tuning for Large Language Models with Limited Resources.
Reference : https://github.com/OpenLMLab/LOMO/blob/main/src/lomo.py
Check the usage from here : https://github.com/OpenLMLab/LOMO/blob/main/src/lomo_trainer.py
:param model: nn.Module. pytorch model.
:param lr: float. learning rate.
:param clip_grad_norm: Optional[float]. clip grad norm.
:param clip_grad_value: Optional[float]. clip grad value.
"""
def __init__(
self,
model: nn.Module,
lr: float = 1e-3,
clip_grad_norm: Optional[float] = None,
clip_grad_value: Optional[float] = None,
):
self.validate_learning_rate(lr)
self.validate_non_negative(clip_grad_norm, 'clip_grad_norm')
self.validate_non_negative(clip_grad_value, 'clip_grad_value')
self.model = model
self.lr = lr
self.clip_grad_norm = clip_grad_norm
self.clip_grad_value = clip_grad_value
self.local_rank: int = int(os.environ.get('LOCAL_RANK', 0))
self.gather_norm: bool = False
self.grad_norms: List[torch.Tensor] = []
self.clip_coef: Optional[float] = None
p0: torch.Tensor = next(iter(self.model.parameters()))
self.grad_func: Callable[[Any], Any] = (
self.fuse_update_zero3() if hasattr(p0, 'ds_tensor') else self.fuse_update()
)
self.loss_scaler: Optional[DynamicLossScaler] = None
if p0.dtype == torch.float16:
if clip_grad_norm is None:
raise ValueError(
'[-] Loss scaling is recommended to be used with grad norm to get better performance.'
)
self.loss_scaler = DynamicLossScaler(init_scale=2 ** 16) # fmt: skip
for _, p in self.model.named_parameters():
if p.requires_grad:
p.register_hook(self.grad_func)
defaults: DEFAULTS = {'lr': lr}
super().__init__(self.model.parameters(), defaults)
def __str__(self) -> str:
return 'LOMO'
@torch.no_grad()
def reset(self):
pass
def fuse_update(self) -> Callable[[Any], Any]:
@torch.no_grad()
def func(x: Any) -> Any:
for _, p in self.model.named_parameters():
if not p.requires_grad or p.grad is None:
continue
if self.loss_scaler and self.loss_scaler.has_overflow_serial or has_overflow(p.grad):
p.grad = None
self.loss_scaler.has_overflow_serial = True
break
grad_fp32 = p.grad.to(torch.float32)
p.grad = None
if self.loss_scaler:
grad_fp32.div_(self.loss_scaler.loss_scale)
if self.gather_norm:
self.grad_norms.append(torch.norm(grad_fp32, 2.0))
else:
if self.clip_grad_value is not None and self.clip_grad_value > 0.0:
grad_fp32.clamp_(min=-self.clip_grad_value, max=self.clip_grad_value)
if self.clip_grad_norm is not None and self.clip_grad_norm > 0.0 and self.clip_coef is not None:
grad_fp32.mul_(self.clip_coef)
p_fp32 = p.to(torch.float32)
p_fp32.add_(grad_fp32, alpha=-self.lr)
p.copy_(p_fp32)
return x
return func
def fuse_update_zero3(self) -> Callable[[Any], Any]: # pragma: no cover
@torch.no_grad()
def func(x: torch.Tensor) -> torch.Tensor:
for _, p in self.model.named_parameters():
if p.grad is None:
continue
all_reduce(p.grad, op=ReduceOp.AVG, async_op=False)
if self.loss_scaler and self.loss_scaler.has_overflow_serial or has_overflow(p.grad):
p.grad = None
self.loss_scaler.has_overflow_serial = True
break
grad_fp32 = p.grad.to(torch.float32)
p.grad = None
param_fp32 = p.ds_tensor.to(torch.float32)
if self.loss_scaler:
grad_fp32.div_(self.loss_scaler.loss_scale)
if self.gather_norm:
self.grad_norms.append(torch.norm(grad_fp32, 2.0))
else:
one_dim_grad_fp32 = grad_fp32.view(-1)
partition_size: int = p.ds_tensor.numel()
start: int = partition_size * self.local_rank
end: int = min(start + partition_size, grad_fp32.numel())
partitioned_grad_fp32 = one_dim_grad_fp32.narrow(0, start, end - start)
if self.clip_grad_value is not None:
partitioned_grad_fp32.clamp_(min=-self.clip_grad_value, max=self.clip_grad_value)
if self.clip_grad_norm is not None and self.clip_grad_norm > 0 and self.clip_coef is not None:
partitioned_grad_fp32.mul_(self.clip_coef)
partitioned_p = param_fp32.narrow(0, 0, end - start)
partitioned_p.add_(partitioned_grad_fp32, alpha=-self.lr)
p.ds_tensor[: end - start] = partitioned_p # fmt: skip
return x
return func
def fused_backward(self, loss, lr: float):
self.lr = lr
if self.clip_grad_norm is not None and self.clip_grad_norm > 0.0 and self.clip_coef is None:
raise ValueError(
'clip_grad_norm is not None, but clip_coef is None. '
'Please call optimizer.grad_norm() before optimizer.fused_backward().'
)
if self.loss_scaler:
loss = loss * self.loss_scaler.loss_scale
loss.backward()
self.grad_func(0)
def grad_norm(self, loss):
self.gather_norm = True
self.grad_norms = []
if self.loss_scaler:
self.loss_scaler.has_overflow_serial = False
loss = loss * self.loss_scaler.loss_scale
loss.backward(retain_graph=True)
self.grad_func(0)
if self.loss_scaler and self.loss_scaler.has_overflow_serial:
self.loss_scaler.update_scale(overflow=True)
with torch.no_grad():
for _, p in self.model.named_parameters():
p.grad = None
return
with torch.no_grad():
self.grad_norms = torch.stack(self.grad_norms)
total_norm = torch.norm(self.grad_norms, 2.0)
self.clip_coef = torch.clamp(float(self.clip_grad_norm) / (total_norm + 1e-6), max=1.0)
self.gather_norm = False
|
(model: torch.nn.modules.module.Module, lr: float = 0.001, clip_grad_norm: Optional[float] = None, clip_grad_value: Optional[float] = None)
|
716,934 |
pytorch_optimizer.optimizer.lomo
|
__init__
| null |
def __init__(
self,
model: nn.Module,
lr: float = 1e-3,
clip_grad_norm: Optional[float] = None,
clip_grad_value: Optional[float] = None,
):
self.validate_learning_rate(lr)
self.validate_non_negative(clip_grad_norm, 'clip_grad_norm')
self.validate_non_negative(clip_grad_value, 'clip_grad_value')
self.model = model
self.lr = lr
self.clip_grad_norm = clip_grad_norm
self.clip_grad_value = clip_grad_value
self.local_rank: int = int(os.environ.get('LOCAL_RANK', 0))
self.gather_norm: bool = False
self.grad_norms: List[torch.Tensor] = []
self.clip_coef: Optional[float] = None
p0: torch.Tensor = next(iter(self.model.parameters()))
self.grad_func: Callable[[Any], Any] = (
self.fuse_update_zero3() if hasattr(p0, 'ds_tensor') else self.fuse_update()
)
self.loss_scaler: Optional[DynamicLossScaler] = None
if p0.dtype == torch.float16:
if clip_grad_norm is None:
raise ValueError(
'[-] Loss scaling is recommended to be used with grad norm to get better performance.'
)
self.loss_scaler = DynamicLossScaler(init_scale=2 ** 16) # fmt: skip
for _, p in self.model.named_parameters():
if p.requires_grad:
p.register_hook(self.grad_func)
defaults: DEFAULTS = {'lr': lr}
super().__init__(self.model.parameters(), defaults)
|
(self, model: torch.nn.modules.module.Module, lr: float = 0.001, clip_grad_norm: Optional[float] = None, clip_grad_value: Optional[float] = None)
|
716,937 |
pytorch_optimizer.optimizer.lomo
|
__str__
| null |
def __str__(self) -> str:
return 'LOMO'
|
(self) -> str
|
716,948 |
pytorch_optimizer.optimizer.lomo
|
fuse_update
| null |
def fuse_update(self) -> Callable[[Any], Any]:
@torch.no_grad()
def func(x: Any) -> Any:
for _, p in self.model.named_parameters():
if not p.requires_grad or p.grad is None:
continue
if self.loss_scaler and self.loss_scaler.has_overflow_serial or has_overflow(p.grad):
p.grad = None
self.loss_scaler.has_overflow_serial = True
break
grad_fp32 = p.grad.to(torch.float32)
p.grad = None
if self.loss_scaler:
grad_fp32.div_(self.loss_scaler.loss_scale)
if self.gather_norm:
self.grad_norms.append(torch.norm(grad_fp32, 2.0))
else:
if self.clip_grad_value is not None and self.clip_grad_value > 0.0:
grad_fp32.clamp_(min=-self.clip_grad_value, max=self.clip_grad_value)
if self.clip_grad_norm is not None and self.clip_grad_norm > 0.0 and self.clip_coef is not None:
grad_fp32.mul_(self.clip_coef)
p_fp32 = p.to(torch.float32)
p_fp32.add_(grad_fp32, alpha=-self.lr)
p.copy_(p_fp32)
return x
return func
|
(self) -> Callable[[Any], Any]
|
716,949 |
pytorch_optimizer.optimizer.lomo
|
fuse_update_zero3
| null |
def fuse_update_zero3(self) -> Callable[[Any], Any]: # pragma: no cover
@torch.no_grad()
def func(x: torch.Tensor) -> torch.Tensor:
for _, p in self.model.named_parameters():
if p.grad is None:
continue
all_reduce(p.grad, op=ReduceOp.AVG, async_op=False)
if self.loss_scaler and self.loss_scaler.has_overflow_serial or has_overflow(p.grad):
p.grad = None
self.loss_scaler.has_overflow_serial = True
break
grad_fp32 = p.grad.to(torch.float32)
p.grad = None
param_fp32 = p.ds_tensor.to(torch.float32)
if self.loss_scaler:
grad_fp32.div_(self.loss_scaler.loss_scale)
if self.gather_norm:
self.grad_norms.append(torch.norm(grad_fp32, 2.0))
else:
one_dim_grad_fp32 = grad_fp32.view(-1)
partition_size: int = p.ds_tensor.numel()
start: int = partition_size * self.local_rank
end: int = min(start + partition_size, grad_fp32.numel())
partitioned_grad_fp32 = one_dim_grad_fp32.narrow(0, start, end - start)
if self.clip_grad_value is not None:
partitioned_grad_fp32.clamp_(min=-self.clip_grad_value, max=self.clip_grad_value)
if self.clip_grad_norm is not None and self.clip_grad_norm > 0 and self.clip_coef is not None:
partitioned_grad_fp32.mul_(self.clip_coef)
partitioned_p = param_fp32.narrow(0, 0, end - start)
partitioned_p.add_(partitioned_grad_fp32, alpha=-self.lr)
p.ds_tensor[: end - start] = partitioned_p # fmt: skip
return x
return func
|
(self) -> Callable[[Any], Any]
|
716,950 |
pytorch_optimizer.optimizer.lomo
|
fused_backward
| null |
def fused_backward(self, loss, lr: float):
self.lr = lr
if self.clip_grad_norm is not None and self.clip_grad_norm > 0.0 and self.clip_coef is None:
raise ValueError(
'clip_grad_norm is not None, but clip_coef is None. '
'Please call optimizer.grad_norm() before optimizer.fused_backward().'
)
if self.loss_scaler:
loss = loss * self.loss_scaler.loss_scale
loss.backward()
self.grad_func(0)
|
(self, loss, lr: float)
|
716,953 |
pytorch_optimizer.optimizer.lomo
|
grad_norm
| null |
def grad_norm(self, loss):
self.gather_norm = True
self.grad_norms = []
if self.loss_scaler:
self.loss_scaler.has_overflow_serial = False
loss = loss * self.loss_scaler.loss_scale
loss.backward(retain_graph=True)
self.grad_func(0)
if self.loss_scaler and self.loss_scaler.has_overflow_serial:
self.loss_scaler.update_scale(overflow=True)
with torch.no_grad():
for _, p in self.model.named_parameters():
p.grad = None
return
with torch.no_grad():
self.grad_norms = torch.stack(self.grad_norms)
total_norm = torch.norm(self.grad_norms, 2.0)
self.clip_coef = torch.clamp(float(self.clip_grad_norm) / (total_norm + 1e-6), max=1.0)
self.gather_norm = False
|
(self, loss)
|
716,977 |
pytorch_optimizer.optimizer.lamb
|
Lamb
|
Large Batch Optimization for Deep Learning.
This Lamb implementation is based on the paper v3, which does not use de-biasing.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param rectify: bool. perform the rectified update similar to RAdam.
:param degenerated_to_sgd: bool. degenerated to SGD.
:param n_sma_threshold: int. (recommended is 5).
:param grad_averaging: bool. whether apply (1 - beta2) to gradient when calculating running averages of gradient.
:param max_grad_norm: float. max gradient norm to clip.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param adam: bool. always use trust ratio = 1, which turns this into Adam. Useful for comparison purposes.
:param pre_norm: bool. perform pre-normalization of all gradients.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Lamb(Optimizer, BaseOptimizer):
r"""Large Batch Optimization for Deep Learning.
This Lamb implementation is based on the paper v3, which does not use de-biasing.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param rectify: bool. perform the rectified update similar to RAdam.
:param degenerated_to_sgd: bool. degenerated to SGD.
:param n_sma_threshold: int. (recommended is 5).
:param grad_averaging: bool. whether apply (1 - beta2) to gradient when calculating running averages of gradient.
:param max_grad_norm: float. max gradient norm to clip.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param adam: bool. always use trust ratio = 1, which turns this into Adam. Useful for comparison purposes.
:param pre_norm: bool. perform pre-normalization of all gradients.
:param eps: float. term added to the denominator to improve numerical stability.
"""
clamp: float = 10.0
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
rectify: bool = False,
degenerated_to_sgd: bool = False,
n_sma_threshold: int = 5,
grad_averaging: bool = True,
max_grad_norm: float = 1.0,
adam: bool = False,
pre_norm: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(max_grad_norm, 'max_grad_norm')
self.validate_non_negative(eps, 'eps')
self.degenerated_to_sgd = degenerated_to_sgd
self.n_sma_threshold = n_sma_threshold
self.pre_norm = pre_norm
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'rectify': rectify,
'grad_averaging': grad_averaging,
'max_grad_norm': max_grad_norm,
'adam': adam,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Lamb'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def get_global_gradient_norm(self) -> Union[torch.Tensor, float]:
if self.defaults['max_grad_norm'] == 0.0:
return 1.0
global_grad_norm = get_global_gradient_norm(self.param_groups, self.param_groups[0]['params'][0].device)
global_grad_norm.sqrt_().add_(self.defaults['eps'])
return torch.clamp(self.defaults['max_grad_norm'] / global_grad_norm, max=1.0)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
grad_norm = 1.0
if self.pre_norm:
grad_norm = self.get_global_gradient_norm()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
beta3: float = 1.0 - beta1 if group['grad_averaging'] else 1.0
bias_correction1: float = 1.0 - beta1 ** group['step']
step_size, n_sma = self.get_rectify_step_size(
is_rectify=group['rectify'],
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
if self.pre_norm:
grad.div_(grad_norm)
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=beta3)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['rectify']:
update = p.clone()
if n_sma >= self.n_sma_threshold:
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
update.addcdiv_(exp_avg, de_nom, value=-step_size)
else:
update.add_(exp_avg, alpha=-step_size)
else:
update = exp_avg / exp_avg_sq.sqrt().add_(group['eps'])
weight_norm = torch.linalg.norm(p).clamp_(min=0, max=self.clamp)
p_norm = torch.linalg.norm(update)
trust_ratio: float = 1.0 if weight_norm == 0 or p_norm == 0 else weight_norm / (p_norm + group['eps'])
state['weight_norm'] = weight_norm
state['adam_norm'] = p_norm
state['trust_ratio'] = trust_ratio
if group['adam']:
trust_ratio = 1.0
if group['rectify']:
if n_sma >= self.n_sma_threshold:
p.addcdiv_(exp_avg, de_nom, value=-step_size * trust_ratio)
else:
p.add_(exp_avg, alpha=-step_size * trust_ratio)
else:
p.add_(update, alpha=-step_size * trust_ratio)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, rectify: bool = False, degenerated_to_sgd: bool = False, n_sma_threshold: int = 5, grad_averaging: bool = True, max_grad_norm: float = 1.0, adam: bool = False, pre_norm: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-06)
|
716,979 |
pytorch_optimizer.optimizer.lamb
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
rectify: bool = False,
degenerated_to_sgd: bool = False,
n_sma_threshold: int = 5,
grad_averaging: bool = True,
max_grad_norm: float = 1.0,
adam: bool = False,
pre_norm: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(max_grad_norm, 'max_grad_norm')
self.validate_non_negative(eps, 'eps')
self.degenerated_to_sgd = degenerated_to_sgd
self.n_sma_threshold = n_sma_threshold
self.pre_norm = pre_norm
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'rectify': rectify,
'grad_averaging': grad_averaging,
'max_grad_norm': max_grad_norm,
'adam': adam,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, rectify: bool = False, degenerated_to_sgd: bool = False, n_sma_threshold: int = 5, grad_averaging: bool = True, max_grad_norm: float = 1.0, adam: bool = False, pre_norm: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-06)
|
716,982 |
pytorch_optimizer.optimizer.lamb
|
__str__
| null |
def __str__(self) -> str:
return 'Lamb'
|
(self) -> str
|
716,994 |
pytorch_optimizer.optimizer.lamb
|
get_global_gradient_norm
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
grad_norm = 1.0
if self.pre_norm:
grad_norm = self.get_global_gradient_norm()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
beta3: float = 1.0 - beta1 if group['grad_averaging'] else 1.0
bias_correction1: float = 1.0 - beta1 ** group['step']
step_size, n_sma = self.get_rectify_step_size(
is_rectify=group['rectify'],
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
if self.pre_norm:
grad.div_(grad_norm)
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=beta3)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['rectify']:
update = p.clone()
if n_sma >= self.n_sma_threshold:
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
update.addcdiv_(exp_avg, de_nom, value=-step_size)
else:
update.add_(exp_avg, alpha=-step_size)
else:
update = exp_avg / exp_avg_sq.sqrt().add_(group['eps'])
weight_norm = torch.linalg.norm(p).clamp_(min=0, max=self.clamp)
p_norm = torch.linalg.norm(update)
trust_ratio: float = 1.0 if weight_norm == 0 or p_norm == 0 else weight_norm / (p_norm + group['eps'])
state['weight_norm'] = weight_norm
state['adam_norm'] = p_norm
state['trust_ratio'] = trust_ratio
if group['adam']:
trust_ratio = 1.0
if group['rectify']:
if n_sma >= self.n_sma_threshold:
p.addcdiv_(exp_avg, de_nom, value=-step_size * trust_ratio)
else:
p.add_(exp_avg, alpha=-step_size * trust_ratio)
else:
p.add_(update, alpha=-step_size * trust_ratio)
return loss
|
(self) -> Union[torch.Tensor, float]
|
717,019 |
pytorch_optimizer.optimizer.shampoo_utils
|
LayerWiseGrafting
|
Layer-wise grafting.
Grafting is a technique to fix the layer-wise scale of Shampoo optimizer.
https://arxiv.org/pdf/2002.11803.pdf studies this in detail. This
allows us to plugin the Shampoo optimizer into settings where SGD/AdaGrad
is already well tuned. Grafting onto Shampoo means take the Shampoo direction,
but use the step magnitude from the grafted optimizer such as Adagrad or SGD.
|
class LayerWiseGrafting(IntEnum):
r"""Layer-wise grafting.
Grafting is a technique to fix the layer-wise scale of Shampoo optimizer.
https://arxiv.org/pdf/2002.11803.pdf studies this in detail. This
allows us to plugin the Shampoo optimizer into settings where SGD/AdaGrad
is already well tuned. Grafting onto Shampoo means take the Shampoo direction,
but use the step magnitude from the grafted optimizer such as Adagrad or SGD.
"""
NONE = 0
SGD = 1
ADAGRAD = 2
RMSPROP = 3
SQRTN = 4
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
717,020 |
pytorch_optimizer.lr_scheduler.linear_warmup
|
LinearScheduler
|
Linear LR Scheduler w/ linear warmup.
|
class LinearScheduler(BaseLinearWarmupScheduler):
r"""Linear LR Scheduler w/ linear warmup."""
def _step(self) -> float:
return self.max_lr + (self.min_lr - self.max_lr) * (self.step_t - self.warmup_steps) / (
self.total_steps - self.warmup_steps
)
|
(optimizer: Type[torch.optim.optimizer.Optimizer], t_max: int, max_lr: float, min_lr: float = 0.0, init_lr: float = 0.0, warmup_steps: int = 0)
|
717,023 |
pytorch_optimizer.lr_scheduler.linear_warmup
|
_step
| null |
def _step(self) -> float:
return self.max_lr + (self.min_lr - self.max_lr) * (self.step_t - self.warmup_steps) / (
self.total_steps - self.warmup_steps
)
|
(self) -> float
|
717,027 |
pytorch_optimizer.optimizer.lion
|
Lion
|
Symbolic Discovery of Optimization Algorithms.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param use_gc: bool. use gradient centralization.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
|
class Lion(Optimizer, BaseOptimizer):
r"""Symbolic Discovery of Optimization Algorithms.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param use_gc: bool. use gradient centralization.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-4,
betas: BETAS = (0.9, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
use_gc: bool = False,
r: float = 0.95,
adanorm: bool = False,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.use_gc = use_gc
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adanorm': adanorm,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Lion'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if self.use_gc:
centralize_gradient(grad, gc_conv_only=False)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg = state['exp_avg']
update = exp_avg.clone()
update.mul_(beta1).add_(grad, alpha=1.0 - beta1).sign_()
exp_avg.mul_(beta2).add_(s_grad, alpha=1.0 - beta2)
p.add_(update, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.0001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.99), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, use_gc: bool = False, r: float = 0.95, adanorm: bool = False)
|
717,029 |
pytorch_optimizer.optimizer.lion
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-4,
betas: BETAS = (0.9, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
use_gc: bool = False,
r: float = 0.95,
adanorm: bool = False,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.use_gc = use_gc
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adanorm': adanorm,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.0001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.99), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, use_gc: bool = False, r: float = 0.95, adanorm: bool = False)
|
717,032 |
pytorch_optimizer.optimizer.lion
|
__str__
| null |
def __str__(self) -> str:
return 'Lion'
|
(self) -> str
|
717,053 |
pytorch_optimizer.optimizer.lion
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if self.use_gc:
centralize_gradient(grad, gc_conv_only=False)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg = state['exp_avg']
update = exp_avg.clone()
update.mul_(beta1).add_(grad, alpha=1.0 - beta1).sign_()
exp_avg.mul_(beta2).add_(s_grad, alpha=1.0 - beta2)
p.add_(update, alpha=-group['lr'])
return loss
|
(self)
|
717,068 |
pytorch_optimizer.optimizer.lookahead
|
Lookahead
|
k steps forward, 1 step back.
:param optimizer: OPTIMIZER. base optimizer.
:param k: int. number of lookahead steps.
:param alpha: float. linear interpolation factor.
:param pullback_momentum: str. change to inner optimizer momentum on interpolation update.
|
class Lookahead(Optimizer, BaseOptimizer):
r"""k steps forward, 1 step back.
:param optimizer: OPTIMIZER. base optimizer.
:param k: int. number of lookahead steps.
:param alpha: float. linear interpolation factor.
:param pullback_momentum: str. change to inner optimizer momentum on interpolation update.
"""
def __init__(
self,
optimizer: OPTIMIZER,
k: int = 5,
alpha: float = 0.5,
pullback_momentum: str = 'none',
):
self.validate_positive(k, 'k')
self.validate_range(alpha, 'alpha', 0.0, 1.0)
self.validate_options(pullback_momentum, 'pullback_momentum', ['none', 'reset', 'pullback'])
self._optimizer_step_pre_hooks: Dict[int, Callable] = {}
self._optimizer_step_post_hooks: Dict[int, Callable] = {}
self.alpha = alpha
self.k = k
self.pullback_momentum = pullback_momentum
self.optimizer = optimizer
self.param_groups = self.optimizer.param_groups
self.state: STATE = defaultdict(dict)
for group in self.param_groups:
if 'counter' not in group:
group['counter'] = 0
for p in group['params']:
state = self.state[p]
state['slow_params'] = torch.empty_like(p)
state['slow_params'].copy_(p)
if self.pullback_momentum == 'pullback':
state['slow_momentum'] = torch.zeros_like(p)
self.defaults: DEFAULTS = {
'lookahead_alpha': alpha,
'lookahead_k': k,
'lookahead_pullback_momentum': pullback_momentum,
**optimizer.defaults,
}
def __getstate__(self):
return {
'state': self.state,
'optimizer': self.optimizer,
'alpha': self.alpha,
'k': self.k,
'pullback_momentum': self.pullback_momentum,
}
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['counter'] = 0
def backup_and_load_cache(self):
r"""Backup cache parameters."""
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['backup_params'] = torch.empty_like(p)
state['backup_params'].copy_(p)
p.data.copy_(state['slow_params'])
def clear_and_load_backup(self):
r"""Load backup parameters."""
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
p.data.copy_(state['backup_params'])
del state['backup_params']
def state_dict(self) -> STATE:
return self.optimizer.state_dict()
def load_state_dict(self, state: STATE):
r"""Load state."""
self.optimizer.load_state_dict(state)
@torch.no_grad()
def zero_grad(self):
self.optimizer.zero_grad(set_to_none=True)
@torch.no_grad()
def update(self, group: Dict):
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
slow = state['slow_params']
p.mul_(self.alpha).add_(slow, alpha=1.0 - self.alpha)
slow.copy_(p)
if 'momentum_buffer' not in self.optimizer.state[p]:
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p)
if self.pullback_momentum == 'pullback':
internal_momentum = self.optimizer.state[p]['momentum_buffer']
self.optimizer.state[p]['momentum_buffer'] = internal_momentum.mul_(self.alpha).add_(
state['slow_momentum'], alpha=1.0 - self.alpha
)
state['slow_momentum'] = self.optimizer.state[p]['momentum_buffer']
elif self.pullback_momentum == 'reset':
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p)
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = self.optimizer.step(closure)
for group in self.param_groups:
group['counter'] += 1
if group['counter'] >= self.k:
group['counter'] = 0
self.update(group)
return loss
|
(optimizer: Type[torch.optim.optimizer.Optimizer], k: int = 5, alpha: float = 0.5, pullback_momentum: str = 'none')
|
717,069 |
pytorch_optimizer.optimizer.lookahead
|
__getstate__
| null |
def __getstate__(self):
return {
'state': self.state,
'optimizer': self.optimizer,
'alpha': self.alpha,
'k': self.k,
'pullback_momentum': self.pullback_momentum,
}
|
(self)
|
717,070 |
pytorch_optimizer.optimizer.lookahead
|
__init__
| null |
def __init__(
self,
optimizer: OPTIMIZER,
k: int = 5,
alpha: float = 0.5,
pullback_momentum: str = 'none',
):
self.validate_positive(k, 'k')
self.validate_range(alpha, 'alpha', 0.0, 1.0)
self.validate_options(pullback_momentum, 'pullback_momentum', ['none', 'reset', 'pullback'])
self._optimizer_step_pre_hooks: Dict[int, Callable] = {}
self._optimizer_step_post_hooks: Dict[int, Callable] = {}
self.alpha = alpha
self.k = k
self.pullback_momentum = pullback_momentum
self.optimizer = optimizer
self.param_groups = self.optimizer.param_groups
self.state: STATE = defaultdict(dict)
for group in self.param_groups:
if 'counter' not in group:
group['counter'] = 0
for p in group['params']:
state = self.state[p]
state['slow_params'] = torch.empty_like(p)
state['slow_params'].copy_(p)
if self.pullback_momentum == 'pullback':
state['slow_momentum'] = torch.zeros_like(p)
self.defaults: DEFAULTS = {
'lookahead_alpha': alpha,
'lookahead_k': k,
'lookahead_pullback_momentum': pullback_momentum,
**optimizer.defaults,
}
|
(self, optimizer: Type[torch.optim.optimizer.Optimizer], k: int = 5, alpha: float = 0.5, pullback_momentum: str = 'none')
|
717,082 |
pytorch_optimizer.optimizer.lookahead
|
backup_and_load_cache
|
Backup cache parameters.
|
def backup_and_load_cache(self):
r"""Backup cache parameters."""
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['backup_params'] = torch.empty_like(p)
state['backup_params'].copy_(p)
p.data.copy_(state['slow_params'])
|
(self)
|
717,083 |
pytorch_optimizer.optimizer.lookahead
|
clear_and_load_backup
|
Load backup parameters.
|
def clear_and_load_backup(self):
r"""Load backup parameters."""
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
p.data.copy_(state['backup_params'])
del state['backup_params']
|
(self)
|
717,087 |
pytorch_optimizer.optimizer.lookahead
|
load_state_dict
|
Load state.
|
def load_state_dict(self, state: STATE):
r"""Load state."""
self.optimizer.load_state_dict(state)
|
(self, state: Dict)
|
717,095 |
pytorch_optimizer.optimizer.lookahead
|
reset
| null |
@torch.no_grad()
def update(self, group: Dict):
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
slow = state['slow_params']
p.mul_(self.alpha).add_(slow, alpha=1.0 - self.alpha)
slow.copy_(p)
if 'momentum_buffer' not in self.optimizer.state[p]:
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p)
if self.pullback_momentum == 'pullback':
internal_momentum = self.optimizer.state[p]['momentum_buffer']
self.optimizer.state[p]['momentum_buffer'] = internal_momentum.mul_(self.alpha).add_(
state['slow_momentum'], alpha=1.0 - self.alpha
)
state['slow_momentum'] = self.optimizer.state[p]['momentum_buffer']
elif self.pullback_momentum == 'reset':
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p)
|
(self)
|
717,097 |
pytorch_optimizer.optimizer.lookahead
|
state_dict
| null |
def state_dict(self) -> STATE:
return self.optimizer.state_dict()
|
(self) -> Dict
|
717,098 |
pytorch_optimizer.optimizer.lookahead
|
step
| null |
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = self.optimizer.step(closure)
for group in self.param_groups:
group['counter'] += 1
if group['counter'] >= self.k:
group['counter'] = 0
self.update(group)
return loss
|
(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]
|
717,111 |
pytorch_optimizer.loss.lovasz
|
LovaszHingeLoss
|
Binary Lovasz hinge loss.
:param per_image: bool. compute the loss per image instead of per batch.
|
class LovaszHingeLoss(nn.Module):
r"""Binary Lovasz hinge loss.
:param per_image: bool. compute the loss per image instead of per batch.
"""
def __init__(self, per_image: bool = True):
super().__init__()
self.per_image = per_image
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if not self.per_image:
return lovasz_hinge_flat(y_pred, y_true)
return sum(lovasz_hinge_flat(y_p, y_t) for y_p, y_t in zip(y_pred, y_true)) / y_pred.size()[0]
|
(per_image: bool = True)
|
717,117 |
pytorch_optimizer.loss.lovasz
|
__init__
| null |
def __init__(self, per_image: bool = True):
super().__init__()
self.per_image = per_image
|
(self, per_image: bool = True)
|
717,147 |
pytorch_optimizer.loss.lovasz
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if not self.per_image:
return lovasz_hinge_flat(y_pred, y_true)
return sum(lovasz_hinge_flat(y_p, y_t) for y_p, y_t in zip(y_pred, y_true)) / y_pred.size()[0]
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
717,181 |
pytorch_optimizer.optimizer.madgrad
|
MADGRAD
|
A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic (slightly modified).
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param eps: float. term added to the denominator to improve numerical stability.
:param weight_decay: float. weight decay (L2 penalty).
MADGRAD optimizer requires less weight decay than other methods, often as little as zero.
On sparse problems both weight_decay and momentum should be set to 0.
:param weight_decouple: float. Apply AdamW style decoupled weight decay.
|
class MADGRAD(Optimizer, BaseOptimizer):
r"""A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic (slightly modified).
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param eps: float. term added to the denominator to improve numerical stability.
:param weight_decay: float. weight decay (L2 penalty).
MADGRAD optimizer requires less weight decay than other methods, often as little as zero.
On sparse problems both weight_decay and momentum should be set to 0.
:param weight_decouple: float. Apply AdamW style decoupled weight decay.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.9,
weight_decay: float = 0.0,
weight_decouple: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'momentum': momentum,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'MADGRAD'
@torch.no_grad()
def reset(self):
self.state['k'] = torch.tensor([0], dtype=torch.long, requires_grad=False)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['grad_sum_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
if group['momentum'] > 0.0:
state['x0'] = torch.clone(p).detach()
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# step counter must be stored in state to ensure correct behavior under optimizer sharding
if 'k' not in self.state:
self.state['k'] = torch.tensor([0], dtype=torch.long, requires_grad=False)
for group in self.param_groups:
weight_decay, momentum, eps = group['weight_decay'], group['momentum'], group['eps']
lr = group['lr'] + eps
_lambda = lr * math.pow(self.state['k'] + 1, 0.5)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if 'grad_sum_sq' not in state:
state['grad_sum_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
if momentum > 0.0:
state['x0'] = torch.clone(p).detach()
if momentum > 0.0 and grad.is_sparse:
raise NoSparseGradientError(str(self), note='momentum > 0.0')
grad_sum_sq, s = state['grad_sum_sq'], state['s']
if weight_decay > 0.0 and not group['weight_decouple']:
if grad.is_sparse:
raise NoSparseGradientError(str(self), note='weight_decay')
# original implementation. not AdamW style
grad.add_(p, alpha=weight_decay)
if grad.is_sparse:
grad = grad.coalesce()
p_masked = p.sparse_mask(grad)
grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)
s_masked = s.sparse_mask(grad)
rms_masked_values = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)
x0_masked_values = p_masked._values().addcdiv(s_masked._values(), rms_masked_values, value=1)
grad_sq = grad * grad
grad_sum_sq.add_(grad_sq, alpha=_lambda)
grad_sum_sq_masked.add_(grad_sq, alpha=_lambda)
rms_masked_values = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)
if eps == 0.0:
rms_masked_values[rms_masked_values == 0] = float('inf')
s.add_(grad, alpha=_lambda)
s_masked._values().add_(grad._values(), alpha=_lambda)
# update masked copy of p
p_kp1_masked_values = x0_masked_values.addcdiv(s_masked._values(), rms_masked_values, value=-1)
# Copy updated masked p to dense p using an add operation
p_masked._values().add_(p_kp1_masked_values, alpha=-1)
p.data.add_(p_masked, alpha=-1)
else:
if momentum == 0.0:
# Compute x_0 from other known quantities
rms = grad_sum_sq.pow(1 / 3).add_(eps)
x0 = p.addcdiv(s, rms, value=1)
else:
x0 = state['x0']
# Accumulate second moments
grad_sum_sq.addcmul_(grad, grad, value=_lambda)
rms = grad_sum_sq.pow(1 / 3).add_(eps)
if eps == 0.0:
rms[rms == 0] = float('inf')
s.add_(grad, alpha=_lambda)
if weight_decay > 0.0 and group['weight_decouple']:
p_old = p.clone()
if momentum == 0.0:
p.copy_(x0.addcdiv(s, rms, value=-1))
else:
z = x0.addcdiv(s, rms, value=-1)
p.mul_(momentum).add_(z, alpha=1.0 - momentum)
if weight_decay > 0.0 and group['weight_decouple']:
p.add_(p_old, alpha=-lr * weight_decay)
self.state['k'] += 1
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.9, weight_decay: float = 0.0, weight_decouple: bool = False, eps: float = 1e-06)
|
717,183 |
pytorch_optimizer.optimizer.madgrad
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.9,
weight_decay: float = 0.0,
weight_decouple: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'momentum': momentum,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.9, weight_decay: float = 0.0, weight_decouple: bool = False, eps: float = 1e-06)
|
717,186 |
pytorch_optimizer.optimizer.madgrad
|
__str__
| null |
def __str__(self) -> str:
return 'MADGRAD'
|
(self) -> str
|
717,207 |
pytorch_optimizer.optimizer.madgrad
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# step counter must be stored in state to ensure correct behavior under optimizer sharding
if 'k' not in self.state:
self.state['k'] = torch.tensor([0], dtype=torch.long, requires_grad=False)
for group in self.param_groups:
weight_decay, momentum, eps = group['weight_decay'], group['momentum'], group['eps']
lr = group['lr'] + eps
_lambda = lr * math.pow(self.state['k'] + 1, 0.5)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if 'grad_sum_sq' not in state:
state['grad_sum_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
if momentum > 0.0:
state['x0'] = torch.clone(p).detach()
if momentum > 0.0 and grad.is_sparse:
raise NoSparseGradientError(str(self), note='momentum > 0.0')
grad_sum_sq, s = state['grad_sum_sq'], state['s']
if weight_decay > 0.0 and not group['weight_decouple']:
if grad.is_sparse:
raise NoSparseGradientError(str(self), note='weight_decay')
# original implementation. not AdamW style
grad.add_(p, alpha=weight_decay)
if grad.is_sparse:
grad = grad.coalesce()
p_masked = p.sparse_mask(grad)
grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)
s_masked = s.sparse_mask(grad)
rms_masked_values = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)
x0_masked_values = p_masked._values().addcdiv(s_masked._values(), rms_masked_values, value=1)
grad_sq = grad * grad
grad_sum_sq.add_(grad_sq, alpha=_lambda)
grad_sum_sq_masked.add_(grad_sq, alpha=_lambda)
rms_masked_values = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)
if eps == 0.0:
rms_masked_values[rms_masked_values == 0] = float('inf')
s.add_(grad, alpha=_lambda)
s_masked._values().add_(grad._values(), alpha=_lambda)
# update masked copy of p
p_kp1_masked_values = x0_masked_values.addcdiv(s_masked._values(), rms_masked_values, value=-1)
# Copy updated masked p to dense p using an add operation
p_masked._values().add_(p_kp1_masked_values, alpha=-1)
p.data.add_(p_masked, alpha=-1)
else:
if momentum == 0.0:
# Compute x_0 from other known quantities
rms = grad_sum_sq.pow(1 / 3).add_(eps)
x0 = p.addcdiv(s, rms, value=1)
else:
x0 = state['x0']
# Accumulate second moments
grad_sum_sq.addcmul_(grad, grad, value=_lambda)
rms = grad_sum_sq.pow(1 / 3).add_(eps)
if eps == 0.0:
rms[rms == 0] = float('inf')
s.add_(grad, alpha=_lambda)
if weight_decay > 0.0 and group['weight_decouple']:
p_old = p.clone()
if momentum == 0.0:
p.copy_(x0.addcdiv(s, rms, value=-1))
else:
z = x0.addcdiv(s, rms, value=-1)
p.mul_(momentum).add_(z, alpha=1.0 - momentum)
if weight_decay > 0.0 and group['weight_decouple']:
p.add_(p_old, alpha=-lr * weight_decay)
self.state['k'] += 1
return loss
|
(self)
|
717,222 |
pytorch_optimizer.optimizer.msvag
|
MSVAG
|
Dissecting Adam: The Sign, Magnitude and Variance of Stochastic Gradients.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. Moving average (momentum) constant (scalar tensor or float value).
|
class MSVAG(Optimizer, BaseOptimizer):
r"""Dissecting Adam: The Sign, Magnitude and Variance of Stochastic Gradients.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. Moving average (momentum) constant (scalar tensor or float value).
"""
def __init__(self, params: PARAMETERS, lr: float = 1e-2, beta: float = 0.9):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
defaults: DEFAULTS = {'lr': lr, 'beta': beta}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'MSVAG'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
@staticmethod
def get_rho(beta_power: float, beta: float) -> float:
r"""Get rho."""
rho: float = (1.0 - beta_power ** 2) * (1.0 - beta) ** 2 # fmt: skip
rho /= (1.0 - beta) * (1.0 - beta_power) ** 2
return min(rho, 0.9999)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta: float = group['beta']
beta_power: float = beta ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta).add_(grad, alpha=1.0 - beta)
exp_avg_sq.mul_(beta).addcmul_(grad, grad, value=1.0 - beta)
m = exp_avg.div(beta_power)
v = exp_avg_sq.div(beta_power)
rho: float = self.get_rho(beta_power, beta)
m_p2 = m.pow(2)
s = (v - m_p2).div_(1.0 - rho)
factor = m_p2.div(m_p2 + rho * s)
torch.nan_to_num(factor, nan=0.0, out=factor)
factor.clamp_(0.0, 1.0)
p.add_(m * factor, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, beta: float = 0.9)
|
717,224 |
pytorch_optimizer.optimizer.msvag
|
__init__
| null |
def __init__(self, params: PARAMETERS, lr: float = 1e-2, beta: float = 0.9):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
defaults: DEFAULTS = {'lr': lr, 'beta': beta}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, beta: float = 0.9)
|
717,227 |
pytorch_optimizer.optimizer.msvag
|
__str__
| null |
def __str__(self) -> str:
return 'MSVAG'
|
(self) -> str
|
717,240 |
pytorch_optimizer.optimizer.msvag
|
get_rho
|
Get rho.
|
@staticmethod
def get_rho(beta_power: float, beta: float) -> float:
r"""Get rho."""
rho: float = (1.0 - beta_power ** 2) * (1.0 - beta) ** 2 # fmt: skip
rho /= (1.0 - beta) * (1.0 - beta_power) ** 2
return min(rho, 0.9999)
|
(beta_power: float, beta: float) -> float
|
717,264 |
pytorch_optimizer.optimizer.nero
|
Nero
|
Learning by Turning: Neural Architecture Aware Optimisation.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. coefficients used for computing running averages of gradient and the squared hessian trace.
:param constraints: bool.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Nero(Optimizer, BaseOptimizer):
"""Learning by Turning: Neural Architecture Aware Optimisation.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. coefficients used for computing running averages of gradient and the squared hessian trace.
:param constraints: bool.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self, params: PARAMETERS, lr: float = 0.01, beta: float = 0.999, constraints: bool = True, eps: float = 1e-8
):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {'lr': lr, 'beta': beta, 'constraints': constraints, 'eps': eps}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Nero'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p) + group['eps'])
state = self.state[p]
state['step'] = 0
state['exp_avg_sq'] = torch.zeros_like(neuron_norm(p))
state['scale'] = neuron_norm(p).mean()
if state['scale'] == 0.0:
state['scale'] = 0.01
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p) + group['eps'])
state['step'] = 0
state['exp_avg_sq'] = torch.zeros_like(neuron_norm(p))
state['scale'] = neuron_norm(p).mean()
if state['scale'] == 0.0:
state['scale'] = 0.01
state['step'] += 1
grad_norm = neuron_norm(grad)
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(group['beta']).addcmul_(grad_norm, grad_norm, value=1.0 - group['beta'])
bias_correction: float = 1.0 - group['beta'] ** state['step']
grad_normed = grad / ((exp_avg_sq / bias_correction).sqrt_().add_(group['eps']))
torch.nan_to_num(grad_normed, nan=0.0, out=grad_normed)
p.sub_(grad_normed, alpha=group['lr'] * state['scale'])
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p) + group['eps'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, beta: float = 0.999, constraints: bool = True, eps: float = 1e-08)
|
717,266 |
pytorch_optimizer.optimizer.nero
|
__init__
| null |
def __init__(
self, params: PARAMETERS, lr: float = 0.01, beta: float = 0.999, constraints: bool = True, eps: float = 1e-8
):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {'lr': lr, 'beta': beta, 'constraints': constraints, 'eps': eps}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, beta: float = 0.999, constraints: bool = True, eps: float = 1e-08)
|
717,269 |
pytorch_optimizer.optimizer.nero
|
__str__
| null |
def __str__(self) -> str:
return 'Nero'
|
(self) -> str
|
717,305 |
pytorch_optimizer.optimizer.novograd
|
NovoGrad
|
Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param grad_averaging: bool. multiply ck (1 - momentum).
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class NovoGrad(Optimizer, BaseOptimizer):
r"""Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param grad_averaging: bool. multiply ck (1 - momentum).
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.95, 0.98),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
grad_averaging: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'grad_averaging': grad_averaging,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'NovoGrad'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
grad = p.grad
g_2 = grad.pow(2).sum() # fmt: skip
state['moments'] = grad.div(g_2.sqrt() + group['eps']) + group['weight_decay'] * p
state['grads_ema'] = g_2
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
step_size: float = group['lr'] * bias_correction2_sq
if not group['adam_debias']:
step_size /= bias_correction1
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
grad_p2 = grad.pow(2).sum()
if len(state) == 0:
state['moments'] = grad.div(grad_p2.sqrt() + group['eps']) + group['weight_decay'] * p
state['grads_ema'] = grad_p2
grads_ema = state['grads_ema']
grads_ema.mul_(beta2).add_(grad_p2, alpha=1.0 - beta2)
de_nom = grads_ema.sqrt().add_(group['eps'])
grad.div_(de_nom)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['grad_averaging']:
grad.mul_(1.0 - beta1)
moments = state['moments']
moments.mul_(beta1).add_(grad)
p.add_(moments, alpha=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.95, 0.98), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, grad_averaging: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
717,307 |
pytorch_optimizer.optimizer.novograd
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.95, 0.98),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
grad_averaging: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'grad_averaging': grad_averaging,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.95, 0.98), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, grad_averaging: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
717,310 |
pytorch_optimizer.optimizer.novograd
|
__str__
| null |
def __str__(self) -> str:
return 'NovoGrad'
|
(self) -> str
|
717,331 |
pytorch_optimizer.optimizer.novograd
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
step_size: float = group['lr'] * bias_correction2_sq
if not group['adam_debias']:
step_size /= bias_correction1
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
grad_p2 = grad.pow(2).sum()
if len(state) == 0:
state['moments'] = grad.div(grad_p2.sqrt() + group['eps']) + group['weight_decay'] * p
state['grads_ema'] = grad_p2
grads_ema = state['grads_ema']
grads_ema.mul_(beta2).add_(grad_p2, alpha=1.0 - beta2)
de_nom = grads_ema.sqrt().add_(group['eps'])
grad.div_(de_nom)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['grad_averaging']:
grad.mul_(1.0 - beta1)
moments = state['moments']
moments.mul_(beta1).add_(grad)
p.add_(moments, alpha=-step_size)
return loss
|
(self)
|
717,346 |
torch.optim.lr_scheduler
|
OneCycleLR
|
Sets the learning rate of each parameter group according to the
1cycle learning rate policy. The 1cycle policy anneals the learning
rate from an initial learning rate to some maximum learning rate and then
from that maximum learning rate to some minimum learning rate much lower
than the initial learning rate.
This policy was initially described in the paper `Super-Convergence:
Very Fast Training of Neural Networks Using Large Learning Rates`_.
The 1cycle learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This scheduler is not chainable.
Note also that the total number of steps in the cycle can be determined in one
of two ways (listed in order of precedence):
#. A value for total_steps is explicitly provided.
#. A number of epochs (epochs) and a number of steps per epoch
(steps_per_epoch) are provided.
In this case, the number of total steps is inferred by
total_steps = epochs * steps_per_epoch
You must either provide a value for total_steps or provide a value for both
epochs and steps_per_epoch.
The default behaviour of this scheduler follows the fastai implementation of 1cycle, which
claims that "unpublished work has shown even better results by using only two phases". To
mimic the behaviour of the original paper instead, set ``three_phase=True``.
Args:
optimizer (Optimizer): Wrapped optimizer.
max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group.
total_steps (int): The total number of steps in the cycle. Note that
if a value is not provided here, then it must be inferred by providing
a value for epochs and steps_per_epoch.
Default: None
epochs (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle
if a value for total_steps is not provided.
Default: None
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the
cycle if a value for total_steps is not provided.
Default: None
pct_start (float): The percentage of the cycle (in number of steps) spent
increasing the learning rate.
Default: 0.3
anneal_strategy (str): {'cos', 'linear'}
Specifies the annealing strategy: "cos" for cosine annealing, "linear" for
linear annealing.
Default: 'cos'
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum'.
Default: True
base_momentum (float or list): Lower momentum boundaries in the cycle
for each parameter group. Note that momentum is cycled inversely
to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr'.
Default: 0.85
max_momentum (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_momentum - base_momentum).
Note that momentum is cycled inversely
to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr'
Default: 0.95
div_factor (float): Determines the initial learning rate via
initial_lr = max_lr/div_factor
Default: 25
final_div_factor (float): Determines the minimum learning rate via
min_lr = initial_lr/final_div_factor
Default: 1e4
three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the
learning rate according to 'final_div_factor' instead of modifying the second
phase (the first two phases will be symmetrical about the step indicated by
'pct_start').
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
.. deprecated:: 2.2
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
learning rate.
Example:
>>> # xdoctest: +SKIP
>>> data_loader = torch.utils.data.DataLoader(...)
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> optimizer.step()
>>> scheduler.step()
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
https://arxiv.org/abs/1708.07120
|
class OneCycleLR(LRScheduler):
r"""Sets the learning rate of each parameter group according to the
1cycle learning rate policy. The 1cycle policy anneals the learning
rate from an initial learning rate to some maximum learning rate and then
from that maximum learning rate to some minimum learning rate much lower
than the initial learning rate.
This policy was initially described in the paper `Super-Convergence:
Very Fast Training of Neural Networks Using Large Learning Rates`_.
The 1cycle learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This scheduler is not chainable.
Note also that the total number of steps in the cycle can be determined in one
of two ways (listed in order of precedence):
#. A value for total_steps is explicitly provided.
#. A number of epochs (epochs) and a number of steps per epoch
(steps_per_epoch) are provided.
In this case, the number of total steps is inferred by
total_steps = epochs * steps_per_epoch
You must either provide a value for total_steps or provide a value for both
epochs and steps_per_epoch.
The default behaviour of this scheduler follows the fastai implementation of 1cycle, which
claims that "unpublished work has shown even better results by using only two phases". To
mimic the behaviour of the original paper instead, set ``three_phase=True``.
Args:
optimizer (Optimizer): Wrapped optimizer.
max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group.
total_steps (int): The total number of steps in the cycle. Note that
if a value is not provided here, then it must be inferred by providing
a value for epochs and steps_per_epoch.
Default: None
epochs (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle
if a value for total_steps is not provided.
Default: None
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the
cycle if a value for total_steps is not provided.
Default: None
pct_start (float): The percentage of the cycle (in number of steps) spent
increasing the learning rate.
Default: 0.3
anneal_strategy (str): {'cos', 'linear'}
Specifies the annealing strategy: "cos" for cosine annealing, "linear" for
linear annealing.
Default: 'cos'
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum'.
Default: True
base_momentum (float or list): Lower momentum boundaries in the cycle
for each parameter group. Note that momentum is cycled inversely
to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr'.
Default: 0.85
max_momentum (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_momentum - base_momentum).
Note that momentum is cycled inversely
to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr'
Default: 0.95
div_factor (float): Determines the initial learning rate via
initial_lr = max_lr/div_factor
Default: 25
final_div_factor (float): Determines the minimum learning rate via
min_lr = initial_lr/final_div_factor
Default: 1e4
three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the
learning rate according to 'final_div_factor' instead of modifying the second
phase (the first two phases will be symmetrical about the step indicated by
'pct_start').
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
.. deprecated:: 2.2
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
learning rate.
Example:
>>> # xdoctest: +SKIP
>>> data_loader = torch.utils.data.DataLoader(...)
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> optimizer.step()
>>> scheduler.step()
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
https://arxiv.org/abs/1708.07120
"""
def __init__(self,
optimizer,
max_lr,
total_steps=None,
epochs=None,
steps_per_epoch=None,
pct_start=0.3,
anneal_strategy='cos',
cycle_momentum=True,
base_momentum=0.85,
max_momentum=0.95,
div_factor=25.,
final_div_factor=1e4,
three_phase=False,
last_epoch=-1,
verbose="deprecated"):
# Validate optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(f'{type(optimizer).__name__} is not an Optimizer')
self.optimizer = optimizer
# Validate total_steps
if total_steps is None and epochs is None and steps_per_epoch is None:
raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)")
elif total_steps is not None:
if total_steps <= 0 or not isinstance(total_steps, int):
raise ValueError(f"Expected positive integer total_steps, but got {total_steps}")
self.total_steps = total_steps
else:
if epochs <= 0 or not isinstance(epochs, int):
raise ValueError(f"Expected positive integer epochs, but got {epochs}")
if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int):
raise ValueError(f"Expected positive integer steps_per_epoch, but got {steps_per_epoch}")
self.total_steps = epochs * steps_per_epoch
if three_phase:
self._schedule_phases = [
{
'end_step': float(pct_start * self.total_steps) - 1,
'start_lr': 'initial_lr',
'end_lr': 'max_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'base_momentum',
},
{
'end_step': float(2 * pct_start * self.total_steps) - 2,
'start_lr': 'max_lr',
'end_lr': 'initial_lr',
'start_momentum': 'base_momentum',
'end_momentum': 'max_momentum',
},
{
'end_step': self.total_steps - 1,
'start_lr': 'initial_lr',
'end_lr': 'min_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'max_momentum',
},
]
else:
self._schedule_phases = [
{
'end_step': float(pct_start * self.total_steps) - 1,
'start_lr': 'initial_lr',
'end_lr': 'max_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'base_momentum',
},
{
'end_step': self.total_steps - 1,
'start_lr': 'max_lr',
'end_lr': 'min_lr',
'start_momentum': 'base_momentum',
'end_momentum': 'max_momentum',
},
]
# Validate pct_start
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
raise ValueError(f"Expected float between 0 and 1 pct_start, but got {pct_start}")
# Validate anneal_strategy
if anneal_strategy not in ['cos', 'linear']:
raise ValueError(f"anneal_strategy must by one of 'cos' or 'linear', instead got {anneal_strategy}")
elif anneal_strategy == 'cos':
self.anneal_func = self._annealing_cos
elif anneal_strategy == 'linear':
self.anneal_func = self._annealing_linear
# Initialize learning rate variables
max_lrs = self._format_param('max_lr', self.optimizer, max_lr)
if last_epoch == -1:
for idx, group in enumerate(self.optimizer.param_groups):
group['initial_lr'] = max_lrs[idx] / div_factor
group['max_lr'] = max_lrs[idx]
group['min_lr'] = group['initial_lr'] / final_div_factor
# Initialize momentum variables
self.cycle_momentum = cycle_momentum
if self.cycle_momentum:
if 'momentum' not in self.optimizer.defaults and 'betas' not in self.optimizer.defaults:
raise ValueError('optimizer must support momentum or beta1 with `cycle_momentum` option enabled')
self.use_beta1 = 'betas' in self.optimizer.defaults
max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
if last_epoch == -1:
for m_momentum, b_momentum, group in zip(max_momentums, base_momentums, optimizer.param_groups):
if self.use_beta1:
group['betas'] = (m_momentum, *group['betas'][1:])
else:
group['momentum'] = m_momentum
group['max_momentum'] = m_momentum
group['base_momentum'] = b_momentum
super().__init__(optimizer, last_epoch, verbose)
def _format_param(self, name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}")
return param
else:
return [param] * len(optimizer.param_groups)
@staticmethod
def _annealing_cos(start, end, pct):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
@staticmethod
def _annealing_linear(start, end, pct):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return (end - start) * pct + start
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
lrs = []
step_num = self.last_epoch
if step_num > self.total_steps:
raise ValueError("Tried to step {} times. The specified number of total steps is {}"
.format(step_num, self.total_steps))
for group in self.optimizer.param_groups:
start_step = 0
for i, phase in enumerate(self._schedule_phases):
end_step = phase['end_step']
if step_num <= end_step or i == len(self._schedule_phases) - 1:
pct = (step_num - start_step) / (end_step - start_step)
computed_lr = self.anneal_func(group[phase['start_lr']], group[phase['end_lr']], pct)
if self.cycle_momentum:
computed_momentum = self.anneal_func(group[phase['start_momentum']], group[phase['end_momentum']], pct)
break
start_step = phase['end_step']
lrs.append(computed_lr)
if self.cycle_momentum:
if self.use_beta1:
group['betas'] = (computed_momentum, *group['betas'][1:])
else:
group['momentum'] = computed_momentum
return lrs
|
(optimizer, max_lr, total_steps=None, epochs=None, steps_per_epoch=None, pct_start=0.3, anneal_strategy='cos', cycle_momentum=True, base_momentum=0.85, max_momentum=0.95, div_factor=25.0, final_div_factor=10000.0, three_phase=False, last_epoch=-1, verbose='deprecated')
|
717,347 |
torch.optim.lr_scheduler
|
__init__
| null |
def __init__(self,
optimizer,
max_lr,
total_steps=None,
epochs=None,
steps_per_epoch=None,
pct_start=0.3,
anneal_strategy='cos',
cycle_momentum=True,
base_momentum=0.85,
max_momentum=0.95,
div_factor=25.,
final_div_factor=1e4,
three_phase=False,
last_epoch=-1,
verbose="deprecated"):
# Validate optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(f'{type(optimizer).__name__} is not an Optimizer')
self.optimizer = optimizer
# Validate total_steps
if total_steps is None and epochs is None and steps_per_epoch is None:
raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)")
elif total_steps is not None:
if total_steps <= 0 or not isinstance(total_steps, int):
raise ValueError(f"Expected positive integer total_steps, but got {total_steps}")
self.total_steps = total_steps
else:
if epochs <= 0 or not isinstance(epochs, int):
raise ValueError(f"Expected positive integer epochs, but got {epochs}")
if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int):
raise ValueError(f"Expected positive integer steps_per_epoch, but got {steps_per_epoch}")
self.total_steps = epochs * steps_per_epoch
if three_phase:
self._schedule_phases = [
{
'end_step': float(pct_start * self.total_steps) - 1,
'start_lr': 'initial_lr',
'end_lr': 'max_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'base_momentum',
},
{
'end_step': float(2 * pct_start * self.total_steps) - 2,
'start_lr': 'max_lr',
'end_lr': 'initial_lr',
'start_momentum': 'base_momentum',
'end_momentum': 'max_momentum',
},
{
'end_step': self.total_steps - 1,
'start_lr': 'initial_lr',
'end_lr': 'min_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'max_momentum',
},
]
else:
self._schedule_phases = [
{
'end_step': float(pct_start * self.total_steps) - 1,
'start_lr': 'initial_lr',
'end_lr': 'max_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'base_momentum',
},
{
'end_step': self.total_steps - 1,
'start_lr': 'max_lr',
'end_lr': 'min_lr',
'start_momentum': 'base_momentum',
'end_momentum': 'max_momentum',
},
]
# Validate pct_start
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
raise ValueError(f"Expected float between 0 and 1 pct_start, but got {pct_start}")
# Validate anneal_strategy
if anneal_strategy not in ['cos', 'linear']:
raise ValueError(f"anneal_strategy must by one of 'cos' or 'linear', instead got {anneal_strategy}")
elif anneal_strategy == 'cos':
self.anneal_func = self._annealing_cos
elif anneal_strategy == 'linear':
self.anneal_func = self._annealing_linear
# Initialize learning rate variables
max_lrs = self._format_param('max_lr', self.optimizer, max_lr)
if last_epoch == -1:
for idx, group in enumerate(self.optimizer.param_groups):
group['initial_lr'] = max_lrs[idx] / div_factor
group['max_lr'] = max_lrs[idx]
group['min_lr'] = group['initial_lr'] / final_div_factor
# Initialize momentum variables
self.cycle_momentum = cycle_momentum
if self.cycle_momentum:
if 'momentum' not in self.optimizer.defaults and 'betas' not in self.optimizer.defaults:
raise ValueError('optimizer must support momentum or beta1 with `cycle_momentum` option enabled')
self.use_beta1 = 'betas' in self.optimizer.defaults
max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
if last_epoch == -1:
for m_momentum, b_momentum, group in zip(max_momentums, base_momentums, optimizer.param_groups):
if self.use_beta1:
group['betas'] = (m_momentum, *group['betas'][1:])
else:
group['momentum'] = m_momentum
group['max_momentum'] = m_momentum
group['base_momentum'] = b_momentum
super().__init__(optimizer, last_epoch, verbose)
|
(self, optimizer, max_lr, total_steps=None, epochs=None, steps_per_epoch=None, pct_start=0.3, anneal_strategy='cos', cycle_momentum=True, base_momentum=0.85, max_momentum=0.95, div_factor=25.0, final_div_factor=10000.0, three_phase=False, last_epoch=-1, verbose='deprecated')
|
717,348 |
torch.optim.lr_scheduler
|
_annealing_cos
|
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
|
@staticmethod
def _annealing_cos(start, end, pct):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
|
(start, end, pct)
|
717,349 |
torch.optim.lr_scheduler
|
_annealing_linear
|
Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0.
|
@staticmethod
def _annealing_linear(start, end, pct):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return (end - start) * pct + start
|
(start, end, pct)
|
717,353 |
torch.optim.lr_scheduler
|
get_lr
| null |
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
lrs = []
step_num = self.last_epoch
if step_num > self.total_steps:
raise ValueError("Tried to step {} times. The specified number of total steps is {}"
.format(step_num, self.total_steps))
for group in self.optimizer.param_groups:
start_step = 0
for i, phase in enumerate(self._schedule_phases):
end_step = phase['end_step']
if step_num <= end_step or i == len(self._schedule_phases) - 1:
pct = (step_num - start_step) / (end_step - start_step)
computed_lr = self.anneal_func(group[phase['start_lr']], group[phase['end_lr']], pct)
if self.cycle_momentum:
computed_momentum = self.anneal_func(group[phase['start_momentum']], group[phase['end_momentum']], pct)
break
start_step = phase['end_step']
lrs.append(computed_lr)
if self.cycle_momentum:
if self.use_beta1:
group['betas'] = (computed_momentum, *group['betas'][1:])
else:
group['momentum'] = computed_momentum
return lrs
|
(self)
|
717,358 |
pytorch_optimizer.optimizer.padam
|
PAdam
|
Closing the Generalization Gap of Adaptive Gradient Methods in Training Deep Neural Networks.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param partial: float. partially adaptive parameter.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
|
class PAdam(Optimizer, BaseOptimizer):
"""Closing the Generalization Gap of Adaptive Gradient Methods in Training Deep Neural Networks.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param partial: float. partially adaptive parameter.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-1,
betas: BETAS = (0.9, 0.999),
partial: float = 0.25,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_range(partial, 'partial', 0.0, 1.0, range_type='(]')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'partial': partial,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'PAdam'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p,
grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
step_size: float = group['lr'] * bias_correction2_sq / bias_correction1
p.addcdiv_(exp_avg, de_nom ** (group['partial'] * 2), value=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.1, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), partial: float = 0.25, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 1e-08)
|
717,360 |
pytorch_optimizer.optimizer.padam
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-1,
betas: BETAS = (0.9, 0.999),
partial: float = 0.25,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_range(partial, 'partial', 0.0, 1.0, range_type='(]')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'partial': partial,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.1, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), partial: float = 0.25, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 1e-08)
|
717,363 |
pytorch_optimizer.optimizer.padam
|
__str__
| null |
def __str__(self) -> str:
return 'PAdam'
|
(self) -> str
|
717,384 |
pytorch_optimizer.optimizer.padam
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p,
grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
step_size: float = group['lr'] * bias_correction2_sq / bias_correction1
p.addcdiv_(exp_avg, de_nom ** (group['partial'] * 2), value=-step_size)
return loss
|
(self)
|
717,399 |
pytorch_optimizer.optimizer.pcgrad
|
PCGrad
|
Gradient Surgery for Multi-Task Learning.
:param optimizer: OPTIMIZER: optimizer instance.
:param reduction: str. reduction method.
|
class PCGrad(BaseOptimizer):
r"""Gradient Surgery for Multi-Task Learning.
:param optimizer: OPTIMIZER: optimizer instance.
:param reduction: str. reduction method.
"""
def __init__(self, optimizer: OPTIMIZER, reduction: str = 'mean'):
self.validate_options(reduction, 'reduction', ['mean', 'sum'])
self.optimizer = optimizer
self.reduction = reduction
@torch.no_grad()
def reset(self):
self.zero_grad()
def zero_grad(self):
return self.optimizer.zero_grad(set_to_none=True)
def step(self):
return self.optimizer.step()
def set_grad(self, grads: List[torch.Tensor]):
idx: int = 0
for group in self.optimizer.param_groups:
for p in group['params']:
p.grad = grads[idx]
idx += 1
def retrieve_grad(self) -> Tuple[List[torch.Tensor], List[int], List[torch.Tensor]]:
r"""Get the gradient of the parameters of the network with specific objective."""
grad, shape, has_grad = [], [], []
for group in self.optimizer.param_groups:
for p in group['params']:
if p.grad is None:
shape.append(p.shape)
grad.append(torch.zeros_like(p, device=p.device))
has_grad.append(torch.zeros_like(p, device=p.device))
continue
shape.append(p.grad.shape)
grad.append(p.grad.clone())
has_grad.append(torch.ones_like(p, device=p.device))
return grad, shape, has_grad
def pack_grad(self, objectives: Iterable) -> Tuple[List[torch.Tensor], List[List[int]], List[torch.Tensor]]:
r"""Pack the gradient of the parameters of the network for each objective.
:param objectives: Iterable[nn.Module]. a list of objectives.
:return: torch.Tensor. packed gradients.
"""
grads, shapes, has_grads = [], [], []
for objective in objectives:
self.optimizer.zero_grad(set_to_none=True)
objective.backward(retain_graph=True)
grad, shape, has_grad = self.retrieve_grad()
grads.append(flatten_grad(grad))
has_grads.append(flatten_grad(has_grad))
shapes.append(shape)
return grads, shapes, has_grads
def project_conflicting(self, grads: List[torch.Tensor], has_grads: List[torch.Tensor]) -> torch.Tensor:
r"""Project conflicting.
:param grads: a list of the gradient of the parameters.
:param has_grads: a list of mask represent whether the parameter has gradient.
:return: torch.Tensor. merged gradients.
"""
shared: torch.Tensor = torch.stack(has_grads).prod(0).bool()
pc_grad: List[torch.Tensor] = deepcopy(grads)
for i, g_i in enumerate(pc_grad):
random.shuffle(grads)
for g_j in grads:
g_i_g_j: torch.Tensor = torch.dot(g_i, g_j)
if g_i_g_j < 0:
pc_grad[i] -= g_i_g_j * g_j / (g_j.norm() ** 2)
merged_grad: torch.Tensor = torch.zeros_like(grads[0])
shared_pc_gradients: torch.Tensor = torch.stack([g[shared] for g in pc_grad])
if self.reduction == 'mean':
merged_grad[shared] = shared_pc_gradients.mean(dim=0)
else:
merged_grad[shared] = shared_pc_gradients.sum(dim=0)
merged_grad[~shared] = torch.stack([g[~shared] for g in pc_grad]).sum(dim=0)
return merged_grad
def pc_backward(self, objectives: Iterable[nn.Module]):
r"""Calculate the gradient of the parameters.
:param objectives: Iterable[nn.Module]. a list of objectives.
"""
grads, shapes, has_grads = self.pack_grad(objectives)
pc_grad = self.project_conflicting(grads, has_grads)
pc_grad = un_flatten_grad(pc_grad, shapes[0])
self.set_grad(pc_grad)
|
(optimizer: Type[torch.optim.optimizer.Optimizer], reduction: str = 'mean')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.