index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
718,106 |
pytorch_optimizer.optimizer.swats
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2: float = 1.0 - beta2 ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg2'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['phase'] == 'sgd':
if 'momentum_buffer' not in state:
state['momentum_buffer'] = torch.zeros_like(grad)
buf = state['momentum_buffer']
buf.mul_(beta1).add_(grad)
update = buf.clone()
update.mul_(1.0 - beta1)
if group['nesterov']:
update.add_(buf, alpha=beta1)
p.add_(update, alpha=-group['lr'])
continue
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_sq,
max_exp_avg_sq=state.get('max_exp_avg_sq', None),
eps=group['eps'],
)
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'] * math.sqrt(bias_correction2),
bias_correction1=bias_correction1,
)
perturb = exp_avg.clone()
perturb.div_(de_nom).mul_(-step_size)
p.add_(perturb)
perturb_view = perturb.view(-1)
pg = perturb_view.dot(grad.view(-1))
if pg != 0:
scaling = perturb_view.dot(perturb_view).div_(-pg)
exp_avg2 = state['exp_avg2']
exp_avg2.mul_(beta2).add_(scaling, alpha=1.0 - beta2)
corrected_exp_avg = exp_avg2 / bias_correction2
if (
group['step'] > 1
and corrected_exp_avg > 0.0
and corrected_exp_avg.allclose(scaling, rtol=group['eps'])
):
group['phase'] = 'sgd'
group['lr'] = corrected_exp_avg.item()
return loss
|
(self)
|
718,121 |
pytorch_optimizer.optimizer.fp16
|
SafeFP16Optimizer
|
Safe FP16 Optimizer.
:param optimizer: OPTIMIZER.
:param aggregate_g_norms: bool. aggregate_g_norms.
:param min_loss_scale: float. min_loss_scale.
|
class SafeFP16Optimizer(Optimizer): # pragma: no cover
r"""Safe FP16 Optimizer.
:param optimizer: OPTIMIZER.
:param aggregate_g_norms: bool. aggregate_g_norms.
:param min_loss_scale: float. min_loss_scale.
"""
def __init__(
self,
optimizer: OPTIMIZER,
aggregate_g_norms: bool = False,
min_loss_scale: float = 2 ** -5,
): # fmt: skip
self.optimizer = optimizer
self.aggregate_g_norms = aggregate_g_norms
self.min_loss_scale = min_loss_scale
self.fp16_params = self.get_parameters(optimizer)
self.fp32_params = self.build_fp32_params(self.fp16_params, flatten=False)
# we want the optimizer to be tracking the fp32 parameters
if len(optimizer.param_groups) != 1:
# future implementers: this should hopefully be a matter of just
# iterating through the param groups and keeping track of the pointer
# through the fp32_params
raise NotImplementedError('[-] Need to implement the parameter group transfer.')
optimizer.param_groups[0]['params'] = self.fp32_params
self.scaler: DynamicLossScaler = DynamicLossScaler(2.0 ** 15) # fmt: skip
self.needs_sync: bool = True
@classmethod
def get_parameters(cls, optimizer: OPTIMIZER):
params: List = []
for pg in optimizer.param_groups:
params += list(pg['params'])
return params
@classmethod
def build_fp32_params(
cls, parameters: PARAMETERS, flatten: bool = True
) -> Union[torch.Tensor, List[torch.Tensor]]:
# create FP32 copy of parameters and grads
if flatten:
total_param_size: int = sum(p.numel() for p in parameters)
fp32_params = torch.zeros(total_param_size, dtype=torch.float, device=parameters[0].device)
offset: int = 0
for p in parameters:
p_num_el = p.numel()
fp32_params[offset : offset + p_num_el].copy_(p.view(-1))
offset += p_num_el
fp32_params = nn.Parameter(fp32_params)
fp32_params.grad = fp32_params.new(total_param_size)
return fp32_params
fp32_params: List = []
for p in parameters:
p32 = nn.Parameter(p.float())
p32.grad = torch.zeros_like(p32)
fp32_params.append(p32)
return fp32_params
def state_dict(self) -> Dict:
r"""Return the optimizer state dict."""
state_dict = self.optimizer.state_dict()
if self.scaler is not None:
state_dict['loss_scaler'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict: Dict):
r"""Load an optimizer state dict.
In general, we should prefer the configuration of the existing optimizer instance
(e.g., learning rate) over that found in the state_dict. This allows us to
resume training from a checkpoint using a new set of optimizer args.
:param state_dict: Dict. state_dict.
"""
if 'loss_scaler' in state_dict and self.scaler is not None and isinstance(state_dict['loss_scaler'], float):
self.scaler.loss_scale = state_dict['loss_scaler']
self.optimizer.load_state_dict(state_dict)
def backward(self, loss, update_main_grads: bool = False):
r"""Compute the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function
additionally dynamically scales the loss to avoid gradient underflow.
:param loss: float. loss.
:param update_main_grads: bool. update main gradient.
"""
if self.scaler is not None:
loss = loss * self.scaler.loss_scale
loss.backward()
self.needs_sync = True
if update_main_grads:
self.update_main_grads()
def sync_fp16_grads_to_fp32(self, multiply_grads: float = 1.0):
r"""Sync fp16 to fp32 gradients."""
if self.needs_sync:
if self.scaler is not None:
# correct for dynamic loss scaler
multiply_grads /= self.scaler.loss_scale
# copy FP16 grads to FP32
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
if p.grad is not None:
p32.grad.copy_(p.grad)
p32.grad.mul_(multiply_grads)
else:
p32.grad = torch.zeros_like(p, dtype=torch.float)
self.needs_sync = False
def multiply_grads(self, c: float):
r"""Multiply grads by a constant c."""
if self.needs_sync:
self.sync_fp16_grads_to_fp32(c)
else:
for p32 in self.fp32_params:
p32.grad.mul_(c)
def update_main_grads(self):
self.sync_fp16_grads_to_fp32()
def clip_main_grads(self, max_norm: float):
r"""Clip gradient norm and updates dynamic loss scaler."""
self.sync_fp16_grads_to_fp32()
grad_norm = clip_grad_norm(self.fp32_params, max_norm, sync=self.aggregate_g_norms)
# detect overflow and adjust loss scale
if self.scaler is not None:
overflow: bool = has_overflow(grad_norm)
prev_scale: float = self.scaler.loss_scale
self.scaler.update_scale(overflow)
if overflow:
self.zero_grad()
if self.scaler.loss_scale <= self.min_loss_scale:
# Use FloatingPointError as an uncommon error
# that parent functions can safely catch to stop training.
self.scaler.loss_scale = prev_scale
raise FloatingPointError(
f'Minimum loss scale reached ({self.min_loss_scale}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.\n'
f'Overflow: setting loss scale to {self.scaler.loss_scale}'
)
return grad_norm
def step(self, closure: CLOSURE = None):
r"""Perform a single optimization step."""
self.sync_fp16_grads_to_fp32()
self.optimizer.step(closure)
# copy FP32 params back into FP16 model
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
p.data.copy_(p32)
def zero_grad(self):
r"""Clear the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
for p32 in self.fp32_params:
p32.grad.zero_()
self.needs_sync = False
def get_lr(self) -> float:
r"""Get learning rate."""
return self.optimizer.get_lr()
def set_lr(self, lr: float):
r"""Set learning rate."""
self.optimizer.set_lr(lr)
@property
def loss_scale(self) -> float:
r"""Convenience function which TorchAgent calls to get current scale value."""
return self.scaler.loss_scale
|
(optimizer: Type[torch.optim.optimizer.Optimizer], aggregate_g_norms: bool = False, min_loss_scale: float = 0.03125)
|
718,123 |
pytorch_optimizer.optimizer.fp16
|
__init__
| null |
def __init__(
self,
optimizer: OPTIMIZER,
aggregate_g_norms: bool = False,
min_loss_scale: float = 2 ** -5,
): # fmt: skip
self.optimizer = optimizer
self.aggregate_g_norms = aggregate_g_norms
self.min_loss_scale = min_loss_scale
self.fp16_params = self.get_parameters(optimizer)
self.fp32_params = self.build_fp32_params(self.fp16_params, flatten=False)
# we want the optimizer to be tracking the fp32 parameters
if len(optimizer.param_groups) != 1:
# future implementers: this should hopefully be a matter of just
# iterating through the param groups and keeping track of the pointer
# through the fp32_params
raise NotImplementedError('[-] Need to implement the parameter group transfer.')
optimizer.param_groups[0]['params'] = self.fp32_params
self.scaler: DynamicLossScaler = DynamicLossScaler(2.0 ** 15) # fmt: skip
self.needs_sync: bool = True
|
(self, optimizer: Type[torch.optim.optimizer.Optimizer], aggregate_g_norms: bool = False, min_loss_scale: float = 0.03125)
|
718,132 |
pytorch_optimizer.optimizer.fp16
|
backward
|
Compute the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function
additionally dynamically scales the loss to avoid gradient underflow.
:param loss: float. loss.
:param update_main_grads: bool. update main gradient.
|
def backward(self, loss, update_main_grads: bool = False):
r"""Compute the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function
additionally dynamically scales the loss to avoid gradient underflow.
:param loss: float. loss.
:param update_main_grads: bool. update main gradient.
"""
if self.scaler is not None:
loss = loss * self.scaler.loss_scale
loss.backward()
self.needs_sync = True
if update_main_grads:
self.update_main_grads()
|
(self, loss, update_main_grads: bool = False)
|
718,133 |
pytorch_optimizer.optimizer.fp16
|
clip_main_grads
|
Clip gradient norm and updates dynamic loss scaler.
|
def clip_main_grads(self, max_norm: float):
r"""Clip gradient norm and updates dynamic loss scaler."""
self.sync_fp16_grads_to_fp32()
grad_norm = clip_grad_norm(self.fp32_params, max_norm, sync=self.aggregate_g_norms)
# detect overflow and adjust loss scale
if self.scaler is not None:
overflow: bool = has_overflow(grad_norm)
prev_scale: float = self.scaler.loss_scale
self.scaler.update_scale(overflow)
if overflow:
self.zero_grad()
if self.scaler.loss_scale <= self.min_loss_scale:
# Use FloatingPointError as an uncommon error
# that parent functions can safely catch to stop training.
self.scaler.loss_scale = prev_scale
raise FloatingPointError(
f'Minimum loss scale reached ({self.min_loss_scale}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.\n'
f'Overflow: setting loss scale to {self.scaler.loss_scale}'
)
return grad_norm
|
(self, max_norm: float)
|
718,134 |
pytorch_optimizer.optimizer.fp16
|
get_lr
|
Get learning rate.
|
def get_lr(self) -> float:
r"""Get learning rate."""
return self.optimizer.get_lr()
|
(self) -> float
|
718,135 |
pytorch_optimizer.optimizer.fp16
|
load_state_dict
|
Load an optimizer state dict.
In general, we should prefer the configuration of the existing optimizer instance
(e.g., learning rate) over that found in the state_dict. This allows us to
resume training from a checkpoint using a new set of optimizer args.
:param state_dict: Dict. state_dict.
|
def load_state_dict(self, state_dict: Dict):
r"""Load an optimizer state dict.
In general, we should prefer the configuration of the existing optimizer instance
(e.g., learning rate) over that found in the state_dict. This allows us to
resume training from a checkpoint using a new set of optimizer args.
:param state_dict: Dict. state_dict.
"""
if 'loss_scaler' in state_dict and self.scaler is not None and isinstance(state_dict['loss_scaler'], float):
self.scaler.loss_scale = state_dict['loss_scaler']
self.optimizer.load_state_dict(state_dict)
|
(self, state_dict: Dict)
|
718,136 |
pytorch_optimizer.optimizer.fp16
|
multiply_grads
|
Multiply grads by a constant c.
|
def multiply_grads(self, c: float):
r"""Multiply grads by a constant c."""
if self.needs_sync:
self.sync_fp16_grads_to_fp32(c)
else:
for p32 in self.fp32_params:
p32.grad.mul_(c)
|
(self, c: float)
|
718,144 |
pytorch_optimizer.optimizer.fp16
|
set_lr
|
Set learning rate.
|
def set_lr(self, lr: float):
r"""Set learning rate."""
self.optimizer.set_lr(lr)
|
(self, lr: float)
|
718,145 |
pytorch_optimizer.optimizer.fp16
|
state_dict
|
Return the optimizer state dict.
|
def state_dict(self) -> Dict:
r"""Return the optimizer state dict."""
state_dict = self.optimizer.state_dict()
if self.scaler is not None:
state_dict['loss_scaler'] = self.scaler.loss_scale
return state_dict
|
(self) -> Dict
|
718,146 |
pytorch_optimizer.optimizer.fp16
|
step
|
Perform a single optimization step.
|
def step(self, closure: CLOSURE = None):
r"""Perform a single optimization step."""
self.sync_fp16_grads_to_fp32()
self.optimizer.step(closure)
# copy FP32 params back into FP16 model
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
p.data.copy_(p32)
|
(self, closure: Optional[Callable[[], float]] = None)
|
718,147 |
pytorch_optimizer.optimizer.fp16
|
sync_fp16_grads_to_fp32
|
Sync fp16 to fp32 gradients.
|
def sync_fp16_grads_to_fp32(self, multiply_grads: float = 1.0):
r"""Sync fp16 to fp32 gradients."""
if self.needs_sync:
if self.scaler is not None:
# correct for dynamic loss scaler
multiply_grads /= self.scaler.loss_scale
# copy FP16 grads to FP32
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
if p.grad is not None:
p32.grad.copy_(p.grad)
p32.grad.mul_(multiply_grads)
else:
p32.grad = torch.zeros_like(p, dtype=torch.float)
self.needs_sync = False
|
(self, multiply_grads: float = 1.0)
|
718,148 |
pytorch_optimizer.optimizer.fp16
|
update_main_grads
| null |
def update_main_grads(self):
self.sync_fp16_grads_to_fp32()
|
(self)
|
718,149 |
pytorch_optimizer.optimizer.fp16
|
zero_grad
|
Clear the gradients of all optimized parameters.
|
def zero_grad(self):
r"""Clear the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
for p32 in self.fp32_params:
p32.grad.zero_()
self.needs_sync = False
|
(self)
|
718,150 |
pytorch_optimizer.optimizer.shampoo
|
ScalableShampoo
|
Scalable Preconditioned Stochastic Tensor Optimization.
This version of Scalable Shampoo Optimizer aims for a single GPU environment, not for a distributed environment
or XLA devices. So, the original intention is to compute pre-conditioners asynchronously on the distributed
CPUs, but this implementation calculates them which takes 99% of the optimization time on a GPU synchronously.
Still, it is much faster than the previous Shampoo Optimizer because using coupled Newton iteration when
computing G^{-1/p} matrices while the previous one uses SVD which is really slow.
Also, this implementation offers
1. lots of plug-ins (e.g. gradient grafting, type of pre-conditioning, etc)
2. not-yet implemented features in the official Pytorch code.
3. readable, organized, clean code.
Reference : https://github.com/google-research/google-research/blob/master/scalable_shampoo/pytorch/shampoo.py.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. beta1, beta2.
:param moving_average_for_momentum: bool. perform moving_average for momentum (beta1).
:param weight_decay: float. weight decay (L2 penalty).
:param decoupled_weight_decay: bool. use decoupled weight_decay.
:param decoupled_learning_rate: bool. use decoupled lr, otherwise couple it w/ preconditioned gradient.
:param inverse_exponent_override: int. fixed exponent for pre-conditioner, if > 0.
:param start_preconditioning_step: int.
:param preconditioning_compute_steps: int. performance tuning params for controlling memory and compute
requirements. How often to compute pre-conditioner. Ideally, 1 is the best. However, the current implementation
doesn't work on the distributed environment (there are no statistics & pre-conditioners sync among replicas),
compute on the GPU (not CPU) and the precision is fp32 (not fp64).
Also, followed by the paper, `preconditioning_compute_steps` does not have a significant effect on the
performance. So, If you have a problem with the speed, try to set this step bigger (e.g. 1000).
:param statistics_compute_steps: int. How often to compute statistics. usually set to 1 (or 10).
:param block_size: int. Block size for large layers (if > 0).
Block size = 1 ==> AdaGrad (Don't do this, extremely inefficient!)
Block size should be as large as feasible under memory/time constraints.
:param skip_preconditioning_rank_lt: int. Skips preconditioning for parameters with rank less than this value.
:param no_preconditioning_for_layers_with_dim_gt: int. avoid preconditioning large layers to reduce overall memory.
:param shape_interpretation: bool. Automatic shape interpretation (for eg: [4, 3, 1024, 512] would
result in 12 x [1024, 512] L and R statistics. Disabled by default which results in Shampoo constructing
statistics [4, 4], [3, 3], [1024, 1024], [512, 512].
:param graft_type: int. type of grafting (SGD or AdaGrad or RMSProp or SQRT_N or None).
:param pre_conditioner_type: int. type of pre-conditioner.
:param nesterov: bool. Nesterov momentum.
:param diagonal_eps: float. term added to the denominator to improve numerical stability.
:param matrix_eps: float. term added to the denominator to improve numerical stability.
:param use_svd: bool. use SVD instead of Schur-Newton method to calculate M^{-1/p}.
Theoretically, Schur-Newton method is faster than SVD method. However, the inefficiency of the loop code and
proper svd kernel, SVD is much faster in some cases (usually in case of small models).
see https://github.com/kozistr/pytorch_optimizer/pull/103
|
class ScalableShampoo(Optimizer, BaseOptimizer):
r"""Scalable Preconditioned Stochastic Tensor Optimization.
This version of Scalable Shampoo Optimizer aims for a single GPU environment, not for a distributed environment
or XLA devices. So, the original intention is to compute pre-conditioners asynchronously on the distributed
CPUs, but this implementation calculates them which takes 99% of the optimization time on a GPU synchronously.
Still, it is much faster than the previous Shampoo Optimizer because using coupled Newton iteration when
computing G^{-1/p} matrices while the previous one uses SVD which is really slow.
Also, this implementation offers
1. lots of plug-ins (e.g. gradient grafting, type of pre-conditioning, etc)
2. not-yet implemented features in the official Pytorch code.
3. readable, organized, clean code.
Reference : https://github.com/google-research/google-research/blob/master/scalable_shampoo/pytorch/shampoo.py.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. beta1, beta2.
:param moving_average_for_momentum: bool. perform moving_average for momentum (beta1).
:param weight_decay: float. weight decay (L2 penalty).
:param decoupled_weight_decay: bool. use decoupled weight_decay.
:param decoupled_learning_rate: bool. use decoupled lr, otherwise couple it w/ preconditioned gradient.
:param inverse_exponent_override: int. fixed exponent for pre-conditioner, if > 0.
:param start_preconditioning_step: int.
:param preconditioning_compute_steps: int. performance tuning params for controlling memory and compute
requirements. How often to compute pre-conditioner. Ideally, 1 is the best. However, the current implementation
doesn't work on the distributed environment (there are no statistics & pre-conditioners sync among replicas),
compute on the GPU (not CPU) and the precision is fp32 (not fp64).
Also, followed by the paper, `preconditioning_compute_steps` does not have a significant effect on the
performance. So, If you have a problem with the speed, try to set this step bigger (e.g. 1000).
:param statistics_compute_steps: int. How often to compute statistics. usually set to 1 (or 10).
:param block_size: int. Block size for large layers (if > 0).
Block size = 1 ==> AdaGrad (Don't do this, extremely inefficient!)
Block size should be as large as feasible under memory/time constraints.
:param skip_preconditioning_rank_lt: int. Skips preconditioning for parameters with rank less than this value.
:param no_preconditioning_for_layers_with_dim_gt: int. avoid preconditioning large layers to reduce overall memory.
:param shape_interpretation: bool. Automatic shape interpretation (for eg: [4, 3, 1024, 512] would
result in 12 x [1024, 512] L and R statistics. Disabled by default which results in Shampoo constructing
statistics [4, 4], [3, 3], [1024, 1024], [512, 512].
:param graft_type: int. type of grafting (SGD or AdaGrad or RMSProp or SQRT_N or None).
:param pre_conditioner_type: int. type of pre-conditioner.
:param nesterov: bool. Nesterov momentum.
:param diagonal_eps: float. term added to the denominator to improve numerical stability.
:param matrix_eps: float. term added to the denominator to improve numerical stability.
:param use_svd: bool. use SVD instead of Schur-Newton method to calculate M^{-1/p}.
Theoretically, Schur-Newton method is faster than SVD method. However, the inefficiency of the loop code and
proper svd kernel, SVD is much faster in some cases (usually in case of small models).
see https://github.com/kozistr/pytorch_optimizer/pull/103
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
moving_average_for_momentum: bool = False,
weight_decay: float = 0.0,
decoupled_weight_decay: bool = False,
decoupled_learning_rate: bool = True,
inverse_exponent_override: int = 0,
start_preconditioning_step: int = 25,
preconditioning_compute_steps: int = 1000,
statistics_compute_steps: int = 1,
block_size: int = 512,
skip_preconditioning_rank_lt: int = 1,
no_preconditioning_for_layers_with_dim_gt: int = 8192,
shape_interpretation: bool = True,
graft_type: int = LayerWiseGrafting.SGD,
pre_conditioner_type: int = PreConditionerType.ALL,
nesterov: bool = True,
diagonal_eps: float = 1e-10,
matrix_eps: float = 1e-6,
use_svd: bool = False,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_step(start_preconditioning_step, 'start_preconditioning_step')
self.validate_step(preconditioning_compute_steps, 'preconditioning_compute_steps')
self.validate_step(statistics_compute_steps, 'statistics_compute_steps')
self.validate_non_negative(diagonal_eps, 'diagonal_eps')
self.validate_non_negative(matrix_eps, 'matrix_eps')
self.inverse_exponent_override = inverse_exponent_override
self.start_preconditioning_step = start_preconditioning_step
self.preconditioning_compute_steps = preconditioning_compute_steps
self.statistics_compute_steps = statistics_compute_steps
self.block_size = block_size
self.skip_preconditioning_rank_lt = skip_preconditioning_rank_lt
self.no_preconditioning_for_layers_with_dim_gt = no_preconditioning_for_layers_with_dim_gt
self.shape_interpretation = shape_interpretation
self.graft_type = graft_type
self.pre_conditioner_type = pre_conditioner_type
self.diagonal_eps = diagonal_eps
self.matrix_eps = matrix_eps
self.use_svd = use_svd
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'decoupled_weight_decay': decoupled_weight_decay,
'decoupled_learning_rate': decoupled_learning_rate,
'moving_average_for_momentum': moving_average_for_momentum,
'nesterov': nesterov,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'ScalableShampoo'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['momentum'] = torch.zeros_like(p)
state['pre_conditioner'] = PreConditioner(
p,
group['betas'][1], # beta2
self.inverse_exponent_override,
self.block_size,
self.skip_preconditioning_rank_lt,
self.no_preconditioning_for_layers_with_dim_gt,
self.shape_interpretation,
self.pre_conditioner_type,
self.matrix_eps,
self.use_svd,
)
state['graft'] = build_graft(p, self.graft_type, self.diagonal_eps)
def is_precondition_step(self, step: int) -> bool:
return step >= self.start_preconditioning_step
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
is_precondition_step: bool = self.is_precondition_step(group['step'])
pre_conditioner_multiplier: float = 1.0 if group['decoupled_learning_rate'] else group['lr']
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum'] = torch.zeros_like(p)
state['pre_conditioner'] = PreConditioner(
p,
beta2,
self.inverse_exponent_override,
self.block_size,
self.skip_preconditioning_rank_lt,
self.no_preconditioning_for_layers_with_dim_gt,
self.shape_interpretation,
self.pre_conditioner_type,
self.matrix_eps,
self.use_svd,
)
state['graft'] = build_graft(p, self.graft_type, self.diagonal_eps)
pre_conditioner, graft = state['pre_conditioner'], state['graft']
graft.add_statistics(grad, beta2)
if group['step'] % self.statistics_compute_steps == 0:
pre_conditioner.add_statistics(grad)
if group['step'] % self.preconditioning_compute_steps == 0:
pre_conditioner.compute_pre_conditioners()
graft_grad: torch.Tensor = graft.precondition_gradient(grad * pre_conditioner_multiplier)
shampoo_grad: torch.Tensor = (
pre_conditioner.preconditioned_grad(grad) if is_precondition_step else grad
)
if self.graft_type != LayerWiseGrafting.NONE:
graft_norm = torch.linalg.norm(graft_grad)
shampoo_norm = torch.linalg.norm(shampoo_grad)
shampoo_grad.mul_(graft_norm / (shampoo_norm + 1e-16))
if group['weight_decay'] > 0.0:
if not group['decoupled_weight_decay']:
graft_grad.add_(p, alpha=group['weight_decay'])
shampoo_grad.add_(p, alpha=group['weight_decay'])
else:
graft_grad.mul_(1.0 - group['lr'] * group['weight_decay'])
shampoo_grad.mul_(1.0 - group['lr'] * group['weight_decay'])
state['momentum'].mul_(beta1).add_(shampoo_grad)
graft_momentum = graft.update_momentum(grad, beta1)
momentum_update = state['momentum'] if is_precondition_step else graft_momentum
if group['nesterov']:
w: float = (1.0 - beta1) if group['moving_average_for_momentum'] else 1.0
wd_update = shampoo_grad if is_precondition_step else graft_grad
wd_update.mul_(w)
momentum_update.mul_(beta1).add_(wd_update)
p.add_(momentum_update, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), moving_average_for_momentum: bool = False, weight_decay: float = 0.0, decoupled_weight_decay: bool = False, decoupled_learning_rate: bool = True, inverse_exponent_override: int = 0, start_preconditioning_step: int = 25, preconditioning_compute_steps: int = 1000, statistics_compute_steps: int = 1, block_size: int = 512, skip_preconditioning_rank_lt: int = 1, no_preconditioning_for_layers_with_dim_gt: int = 8192, shape_interpretation: bool = True, graft_type: int = <LayerWiseGrafting.SGD: 1>, pre_conditioner_type: int = <PreConditionerType.ALL: 0>, nesterov: bool = True, diagonal_eps: float = 1e-10, matrix_eps: float = 1e-06, use_svd: bool = False)
|
718,152 |
pytorch_optimizer.optimizer.shampoo
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
moving_average_for_momentum: bool = False,
weight_decay: float = 0.0,
decoupled_weight_decay: bool = False,
decoupled_learning_rate: bool = True,
inverse_exponent_override: int = 0,
start_preconditioning_step: int = 25,
preconditioning_compute_steps: int = 1000,
statistics_compute_steps: int = 1,
block_size: int = 512,
skip_preconditioning_rank_lt: int = 1,
no_preconditioning_for_layers_with_dim_gt: int = 8192,
shape_interpretation: bool = True,
graft_type: int = LayerWiseGrafting.SGD,
pre_conditioner_type: int = PreConditionerType.ALL,
nesterov: bool = True,
diagonal_eps: float = 1e-10,
matrix_eps: float = 1e-6,
use_svd: bool = False,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_step(start_preconditioning_step, 'start_preconditioning_step')
self.validate_step(preconditioning_compute_steps, 'preconditioning_compute_steps')
self.validate_step(statistics_compute_steps, 'statistics_compute_steps')
self.validate_non_negative(diagonal_eps, 'diagonal_eps')
self.validate_non_negative(matrix_eps, 'matrix_eps')
self.inverse_exponent_override = inverse_exponent_override
self.start_preconditioning_step = start_preconditioning_step
self.preconditioning_compute_steps = preconditioning_compute_steps
self.statistics_compute_steps = statistics_compute_steps
self.block_size = block_size
self.skip_preconditioning_rank_lt = skip_preconditioning_rank_lt
self.no_preconditioning_for_layers_with_dim_gt = no_preconditioning_for_layers_with_dim_gt
self.shape_interpretation = shape_interpretation
self.graft_type = graft_type
self.pre_conditioner_type = pre_conditioner_type
self.diagonal_eps = diagonal_eps
self.matrix_eps = matrix_eps
self.use_svd = use_svd
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'decoupled_weight_decay': decoupled_weight_decay,
'decoupled_learning_rate': decoupled_learning_rate,
'moving_average_for_momentum': moving_average_for_momentum,
'nesterov': nesterov,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), moving_average_for_momentum: bool = False, weight_decay: float = 0.0, decoupled_weight_decay: bool = False, decoupled_learning_rate: bool = True, inverse_exponent_override: int = 0, start_preconditioning_step: int = 25, preconditioning_compute_steps: int = 1000, statistics_compute_steps: int = 1, block_size: int = 512, skip_preconditioning_rank_lt: int = 1, no_preconditioning_for_layers_with_dim_gt: int = 8192, shape_interpretation: bool = True, graft_type: int = <LayerWiseGrafting.SGD: 1>, pre_conditioner_type: int = <PreConditionerType.ALL: 0>, nesterov: bool = True, diagonal_eps: float = 1e-10, matrix_eps: float = 1e-06, use_svd: bool = False)
|
718,155 |
pytorch_optimizer.optimizer.shampoo
|
__str__
| null |
def __str__(self) -> str:
return 'ScalableShampoo'
|
(self) -> str
|
718,168 |
pytorch_optimizer.optimizer.shampoo
|
is_precondition_step
| null |
def is_precondition_step(self, step: int) -> bool:
return step >= self.start_preconditioning_step
|
(self, step: int) -> bool
|
718,177 |
pytorch_optimizer.optimizer.shampoo
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
momentum = group['momentum']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
if momentum > 0.0:
state['momentum_buffer'] = grad.clone()
for dim_id, dim in enumerate(grad.size()):
state[f'pre_cond_{dim_id}'] = group['matrix_eps'] * torch.eye(dim, out=grad.new(dim, dim))
state[f'inv_pre_cond_{dim_id}'] = grad.new(dim, dim).zero_()
if momentum > 0.0:
grad.mul_(1.0 - momentum).add_(state['momentum_buffer'], alpha=momentum)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
order: int = grad.ndimension()
original_size: int = grad.size()
for dim_id, dim in enumerate(grad.size()):
pre_cond, inv_pre_cond = state[f'pre_cond_{dim_id}'], state[f'inv_pre_cond_{dim_id}']
grad = grad.transpose_(0, dim_id).contiguous()
transposed_size = grad.size()
grad = grad.view(dim, -1)
grad_t = grad.t()
pre_cond.add_(grad @ grad_t)
if group['step'] % self.preconditioning_compute_steps == 0:
inv_pre_cond.copy_(compute_power_svd(pre_cond, order))
if dim_id == order - 1:
grad = grad_t @ inv_pre_cond
grad = grad.view(original_size)
else:
grad = inv_pre_cond @ grad
grad = grad.view(transposed_size)
state['momentum_buffer'] = grad
p.add_(grad, alpha=-group['lr'])
return loss
|
(self)
|
718,192 |
pytorch_optimizer.optimizer.shampoo
|
Shampoo
|
Preconditioned Stochastic Tensor Optimization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param preconditioning_compute_steps: int. performance tuning params for controlling memory and compute
requirements. How often to compute pre-conditioner.
:param matrix_eps: float. term added to the denominator to improve numerical stability.
|
class Shampoo(Optimizer, BaseOptimizer):
r"""Preconditioned Stochastic Tensor Optimization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param preconditioning_compute_steps: int. performance tuning params for controlling memory and compute
requirements. How often to compute pre-conditioner.
:param matrix_eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.0,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
preconditioning_compute_steps: int = 1,
matrix_eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_step(preconditioning_compute_steps, 'preconditioning_compute_steps')
self.validate_non_negative(matrix_eps, 'matrix_eps')
self.preconditioning_compute_steps = preconditioning_compute_steps
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'matrix_eps': matrix_eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Shampoo'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
momentum = group['momentum']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
if momentum > 0.0:
state['momentum_buffer'] = grad.clone()
for dim_id, dim in enumerate(grad.size()):
state[f'pre_cond_{dim_id}'] = group['matrix_eps'] * torch.eye(dim, out=grad.new(dim, dim))
state[f'inv_pre_cond_{dim_id}'] = grad.new(dim, dim).zero_()
if momentum > 0.0:
grad.mul_(1.0 - momentum).add_(state['momentum_buffer'], alpha=momentum)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
order: int = grad.ndimension()
original_size: int = grad.size()
for dim_id, dim in enumerate(grad.size()):
pre_cond, inv_pre_cond = state[f'pre_cond_{dim_id}'], state[f'inv_pre_cond_{dim_id}']
grad = grad.transpose_(0, dim_id).contiguous()
transposed_size = grad.size()
grad = grad.view(dim, -1)
grad_t = grad.t()
pre_cond.add_(grad @ grad_t)
if group['step'] % self.preconditioning_compute_steps == 0:
inv_pre_cond.copy_(compute_power_svd(pre_cond, order))
if dim_id == order - 1:
grad = grad_t @ inv_pre_cond
grad = grad.view(original_size)
else:
grad = inv_pre_cond @ grad
grad = grad.view(transposed_size)
state['momentum_buffer'] = grad
p.add_(grad, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.0, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, preconditioning_compute_steps: int = 1, matrix_eps: float = 1e-06)
|
718,194 |
pytorch_optimizer.optimizer.shampoo
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.0,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
preconditioning_compute_steps: int = 1,
matrix_eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_step(preconditioning_compute_steps, 'preconditioning_compute_steps')
self.validate_non_negative(matrix_eps, 'matrix_eps')
self.preconditioning_compute_steps = preconditioning_compute_steps
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'matrix_eps': matrix_eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.0, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, preconditioning_compute_steps: int = 1, matrix_eps: float = 1e-06)
|
718,197 |
pytorch_optimizer.optimizer.shampoo
|
__str__
| null |
def __str__(self) -> str:
return 'Shampoo'
|
(self) -> str
|
718,233 |
pytorch_optimizer.optimizer.sgd
|
SignSGD
|
Compressed Optimisation for Non-Convex Problems.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor (0.0 = SignSGD, >0 = Signum).
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
|
class SignSGD(Optimizer, BaseOptimizer):
r"""Compressed Optimisation for Non-Convex Problems.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor (0.0 = SignSGD, >0 = Signum).
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.9,
weight_decay: float = 0.0,
weight_decouple: bool = True,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'beta', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
}
super().__init__(params, defaults)
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
if group['momentum'] > 0.0:
state['momentum_buffer'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if momentum > 0.0:
if len(state) == 0:
state['momentum_buffer'] = torch.zeros_like(p)
buf = state['momentum_buffer']
buf.mul_(momentum).add_(grad, alpha=1.0 - momentum)
else:
buf = grad
p.add_(torch.sign(buf), alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.9, weight_decay: float = 0.0, weight_decouple: bool = True)
|
718,235 |
pytorch_optimizer.optimizer.sgd
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.9,
weight_decay: float = 0.0,
weight_decouple: bool = True,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'beta', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.9, weight_decay: float = 0.0, weight_decouple: bool = True)
|
718,273 |
pytorch_optimizer.loss.f1
|
SoftF1Loss
|
Soft-F1 loss.
:param beta: float. f-beta.
:param eps: float. epsilon.
|
class SoftF1Loss(nn.Module):
r"""Soft-F1 loss.
:param beta: float. f-beta.
:param eps: float. epsilon.
"""
def __init__(self, beta: float = 1.0, eps: float = 1e-6):
super().__init__()
self.beta = beta
self.eps = eps
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
tp = (y_true * y_pred).sum().float()
fn = ((1 - y_true) * y_pred).sum().float()
fp = (y_true * (1 - y_pred)).sum().float()
p = tp / (tp + fp + self.eps)
r = tp / (tp + fn + self.eps)
f1 = (1 + self.beta ** 2) * (p * r) / ((self.beta ** 2) * p + r + self.eps) # fmt: skip
f1 = torch.where(torch.isnan(f1), torch.zeros_like(f1), f1)
return 1.0 - f1.mean()
|
(beta: float = 1.0, eps: float = 1e-06)
|
718,279 |
pytorch_optimizer.loss.f1
|
__init__
| null |
def __init__(self, beta: float = 1.0, eps: float = 1e-6):
super().__init__()
self.beta = beta
self.eps = eps
|
(self, beta: float = 1.0, eps: float = 1e-06)
|
718,309 |
pytorch_optimizer.loss.f1
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
tp = (y_true * y_pred).sum().float()
fn = ((1 - y_true) * y_pred).sum().float()
fp = (y_true * (1 - y_pred)).sum().float()
p = tp / (tp + fp + self.eps)
r = tp / (tp + fn + self.eps)
f1 = (1 + self.beta ** 2) * (p * r) / ((self.beta ** 2) * p + r + self.eps) # fmt: skip
f1 = torch.where(torch.isnan(f1), torch.zeros_like(f1), f1)
return 1.0 - f1.mean()
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
718,343 |
pytorch_optimizer.optimizer.sophia
|
SophiaH
|
Second-order Clipped Stochastic Optimization.
Requires `loss.backward(create_graph=True)` in order to calculate hessians.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param p: float. clip effective (applied) gradient (p).
:param update_period: int. number of steps after which to apply hessian approximation.
:param num_samples: int. times to sample `z` for the approximation of the hessian trace.
:param hessian_distribution: HUTCHINSON_G. type of distribution to initialize hessian.
:param eps: float. term added to the denominator to improve numerical stability.
|
class SophiaH(Optimizer, BaseOptimizer):
r"""Second-order Clipped Stochastic Optimization.
Requires `loss.backward(create_graph=True)` in order to calculate hessians.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param p: float. clip effective (applied) gradient (p).
:param update_period: int. number of steps after which to apply hessian approximation.
:param num_samples: int. times to sample `z` for the approximation of the hessian trace.
:param hessian_distribution: HUTCHINSON_G. type of distribution to initialize hessian.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 6e-2,
betas: BETAS = (0.96, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
p: float = 1e-2,
update_period: int = 10,
num_samples: int = 1,
hessian_distribution: HUTCHINSON_G = 'gaussian',
eps: float = 1e-12,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(p, 'p (gradient clip)')
self.validate_step(update_period, 'update_period')
self.validate_positive(num_samples, 'num_samples')
self.validate_options(hessian_distribution, 'hessian_distribution', ['gaussian', 'rademacher'])
self.validate_non_negative(eps, 'eps')
self.update_period = update_period
self.num_samples = num_samples
self.distribution = hessian_distribution
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'p': p,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'SophiaH'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['momentum'] = torch.zeros_like(p)
state['hessian_moment'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None, hessian: Optional[List[torch.Tensor]] = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
step: int = self.param_groups[0].get('step', 1)
if hessian is not None:
self.set_hessian(self.param_groups, self.state, hessian)
elif step % self.update_period == 0:
self.zero_hessian(self.param_groups, self.state)
self.compute_hutchinson_hessian(
param_groups=self.param_groups,
state=self.state,
num_samples=self.num_samples,
distribution=self.distribution,
)
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum'] = torch.zeros_like(p)
state['hessian_moment'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
momentum, hessian_moment = state['momentum'], state['hessian_moment']
momentum.mul_(beta1).add_(grad, alpha=1.0 - beta1)
if 'hessian' in state and (group['step'] % self.update_period == 0 or hessian is not None):
hessian_moment.mul_(beta2).add_(state['hessian'], alpha=1.0 - beta2)
update = (momentum / torch.clip(hessian_moment, min=group['eps'])).clamp_(-group['p'], group['p'])
p.add_(update, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.06, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.96, 0.99), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, p: float = 0.01, update_period: int = 10, num_samples: int = 1, hessian_distribution: Literal['gaussian', 'rademacher'] = 'gaussian', eps: float = 1e-12)
|
718,345 |
pytorch_optimizer.optimizer.sophia
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 6e-2,
betas: BETAS = (0.96, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
p: float = 1e-2,
update_period: int = 10,
num_samples: int = 1,
hessian_distribution: HUTCHINSON_G = 'gaussian',
eps: float = 1e-12,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(p, 'p (gradient clip)')
self.validate_step(update_period, 'update_period')
self.validate_positive(num_samples, 'num_samples')
self.validate_options(hessian_distribution, 'hessian_distribution', ['gaussian', 'rademacher'])
self.validate_non_negative(eps, 'eps')
self.update_period = update_period
self.num_samples = num_samples
self.distribution = hessian_distribution
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'p': p,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.06, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.96, 0.99), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, p: float = 0.01, update_period: int = 10, num_samples: int = 1, hessian_distribution: Literal['gaussian', 'rademacher'] = 'gaussian', eps: float = 1e-12)
|
718,348 |
pytorch_optimizer.optimizer.sophia
|
__str__
| null |
def __str__(self) -> str:
return 'SophiaH'
|
(self) -> str
|
718,369 |
pytorch_optimizer.optimizer.sophia
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None, hessian: Optional[List[torch.Tensor]] = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
step: int = self.param_groups[0].get('step', 1)
if hessian is not None:
self.set_hessian(self.param_groups, self.state, hessian)
elif step % self.update_period == 0:
self.zero_hessian(self.param_groups, self.state)
self.compute_hutchinson_hessian(
param_groups=self.param_groups,
state=self.state,
num_samples=self.num_samples,
distribution=self.distribution,
)
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum'] = torch.zeros_like(p)
state['hessian_moment'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
momentum, hessian_moment = state['momentum'], state['hessian_moment']
momentum.mul_(beta1).add_(grad, alpha=1.0 - beta1)
if 'hessian' in state and (group['step'] % self.update_period == 0 or hessian is not None):
hessian_moment.mul_(beta2).add_(state['hessian'], alpha=1.0 - beta2)
update = (momentum / torch.clip(hessian_moment, min=group['eps'])).clamp_(-group['p'], group['p'])
p.add_(update, alpha=-group['lr'])
return loss
|
(self)
|
718,384 |
pytorch_optimizer.optimizer.tiger
|
Tiger
|
A Tight-fisted Optimizer, an optimizer that is extremely budget-conscious.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
|
class Tiger(Optimizer, BaseOptimizer):
r"""A Tight-fisted Optimizer, an optimizer that is extremely budget-conscious.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
beta: float = 0.965,
weight_decay: float = 0.01,
weight_decouple: bool = True,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[)')
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'beta': beta,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Tiger'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
beta = group['beta']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg = state['exp_avg']
exp_avg.mul_(beta).add_(grad, alpha=1.0 - beta)
p.add_(torch.sign(exp_avg), alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, beta: float = 0.965, weight_decay: float = 0.01, weight_decouple: bool = True, fixed_decay: bool = False)
|
718,386 |
pytorch_optimizer.optimizer.tiger
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
beta: float = 0.965,
weight_decay: float = 0.01,
weight_decouple: bool = True,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[)')
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'beta': beta,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, beta: float = 0.965, weight_decay: float = 0.01, weight_decouple: bool = True, fixed_decay: bool = False)
|
718,389 |
pytorch_optimizer.optimizer.tiger
|
__str__
| null |
def __str__(self) -> str:
return 'Tiger'
|
(self) -> str
|
718,425 |
pytorch_optimizer.loss.tversky
|
TverskyLoss
|
Tversky Loss w/ logits input.
:param alpha: float. alpha.
:param beta: float. beta.
:param smooth: float. smooth factor.
|
class TverskyLoss(nn.Module):
r"""Tversky Loss w/ logits input.
:param alpha: float. alpha.
:param beta: float. beta.
:param smooth: float. smooth factor.
"""
def __init__(self, alpha: float = 0.5, beta: float = 0.5, smooth: float = 1e-6):
super().__init__()
self.alpha = alpha
self.beta = beta
self.smooth = smooth
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
y_pred = torch.sigmoid(y_pred)
y_pred = y_pred.view(-1)
y_true = y_true.view(-1)
tp = (y_pred * y_true).sum()
fp = ((1.0 - y_true) * y_pred).sum()
fn = (y_true * (1.0 - y_pred)).sum()
loss = (tp + self.smooth) / (tp + self.alpha * fp + self.beta * fn + self.smooth)
return 1.0 - loss
|
(alpha: float = 0.5, beta: float = 0.5, smooth: float = 1e-06)
|
718,431 |
pytorch_optimizer.loss.tversky
|
__init__
| null |
def __init__(self, alpha: float = 0.5, beta: float = 0.5, smooth: float = 1e-6):
super().__init__()
self.alpha = alpha
self.beta = beta
self.smooth = smooth
|
(self, alpha: float = 0.5, beta: float = 0.5, smooth: float = 1e-06)
|
718,461 |
pytorch_optimizer.loss.tversky
|
forward
| null |
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
y_pred = torch.sigmoid(y_pred)
y_pred = y_pred.view(-1)
y_true = y_true.view(-1)
tp = (y_pred * y_true).sum()
fp = ((1.0 - y_true) * y_pred).sum()
fn = (y_true * (1.0 - y_pred)).sum()
loss = (tp + self.smooth) / (tp + self.alpha * fp + self.beta * fn + self.smooth)
return 1.0 - loss
|
(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor
|
718,495 |
pytorch_optimizer.optimizer.yogi
|
Yogi
|
Decoupled Weight Decay Regularization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param initial_accumulator: float. initial values for first and second moments.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Yogi(Optimizer, BaseOptimizer):
r"""Decoupled Weight Decay Regularization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param initial_accumulator: float. initial values for first and second moments.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-2,
betas: BETAS = (0.9, 0.999),
initial_accumulator: float = 1e-6,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-3,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'initial_accumulator': initial_accumulator,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Yogi'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.full_like(p, fill_value=group['initial_accumulator'])
state['exp_avg_sq'] = torch.full_like(p, fill_value=group['initial_accumulator'])
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.full_like(p, fill_value=group['initial_accumulator'])
state['exp_avg_sq'] = torch.full_like(p, fill_value=group['initial_accumulator'])
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
grad_p2 = grad.mul(grad)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.addcmul_((exp_avg_sq - grad_p2).sign_(), grad_p2, value=-(1.0 - beta2))
de_nom = exp_avg_sq.sqrt().div_(bias_correction2_sq).add_(group['eps'])
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'], step_size=group['lr'], bias_correction1=bias_correction1
)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), initial_accumulator: float = 1e-06, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 0.001)
|
718,497 |
pytorch_optimizer.optimizer.yogi
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-2,
betas: BETAS = (0.9, 0.999),
initial_accumulator: float = 1e-6,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-3,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'initial_accumulator': initial_accumulator,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), initial_accumulator: float = 1e-06, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 0.001)
|
718,500 |
pytorch_optimizer.optimizer.yogi
|
__str__
| null |
def __str__(self) -> str:
return 'Yogi'
|
(self) -> str
|
718,521 |
pytorch_optimizer.optimizer.yogi
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.full_like(p, fill_value=group['initial_accumulator'])
state['exp_avg_sq'] = torch.full_like(p, fill_value=group['initial_accumulator'])
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
grad_p2 = grad.mul(grad)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.addcmul_((exp_avg_sq - grad_p2).sign_(), grad_p2, value=-(1.0 - beta2))
de_nom = exp_avg_sq.sqrt().div_(bias_correction2_sq).add_(group['eps'])
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'], step_size=group['lr'], bias_correction1=bias_correction1
)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
return loss
|
(self)
|
718,536 |
pytorch_optimizer.optimizer.agc
|
agc
|
Clip gradient values in excess of the unit wise norm.
:param p: torch.Tensor. parameter.
:param grad: torch.Tensor, gradient.
:param agc_eps: float. agc epsilon to clip the norm of parameter.
:param agc_clip_val: float. norm clip.
:param eps: float. simple stop from div by zero and no relation to standard optimizer eps.
|
def agc(p: torch.Tensor, grad: torch.Tensor, agc_eps: float, agc_clip_val: float, eps: float = 1e-6) -> torch.Tensor:
r"""Clip gradient values in excess of the unit wise norm.
:param p: torch.Tensor. parameter.
:param grad: torch.Tensor, gradient.
:param agc_eps: float. agc epsilon to clip the norm of parameter.
:param agc_clip_val: float. norm clip.
:param eps: float. simple stop from div by zero and no relation to standard optimizer eps.
"""
p_norm = unit_norm(p).clamp_(agc_eps)
g_norm = unit_norm(grad)
max_norm = p_norm * agc_clip_val
clipped_grad = grad * (max_norm / g_norm.clamp_min_(eps))
return torch.where(g_norm > max_norm, clipped_grad, grad)
|
(p: torch.Tensor, grad: torch.Tensor, agc_eps: float, agc_clip_val: float, eps: float = 1e-06) -> torch.Tensor
|
718,538 |
pytorch_optimizer.optimizer.gc
|
centralize_gradient
|
Gradient Centralization (GC).
:param x: torch.Tensor. gradient.
:param gc_conv_only: bool. 'False' for both conv & fc layers.
|
def centralize_gradient(x: torch.Tensor, gc_conv_only: bool = False):
r"""Gradient Centralization (GC).
:param x: torch.Tensor. gradient.
:param gc_conv_only: bool. 'False' for both conv & fc layers.
"""
size: int = x.dim()
if (gc_conv_only and size > 3) or (not gc_conv_only and size > 1):
x.add_(-x.mean(dim=tuple(range(1, size)), keepdim=True))
|
(x: torch.Tensor, gc_conv_only: bool = False)
|
718,539 |
pytorch_optimizer.optimizer.utils
|
clip_grad_norm
|
Clip gradient norms.
During combination with FSDP, will also ensure that grad norms are aggregated across all workers,
since each worker only stores their shard of the gradients.
:param parameters: PARAMETERS. Parameters whose gradients we wish to clip.
:param max_norm: float. Maximum norm we wish the gradients to have. If non-positive, then we will not perform
clipping.
:param sync: bool. Boolean indicating whether we should aggregate across the distributed group. Used only in
combination with FSDP.
:returns: The gradient norm across all parameters, before clipping.
|
def clip_grad_norm(
parameters: PARAMETERS,
max_norm: float = 0.0,
sync: bool = False,
) -> Union[torch.Tensor, float]: # pragma: no cover
r"""Clip gradient norms.
During combination with FSDP, will also ensure that grad norms are aggregated across all workers,
since each worker only stores their shard of the gradients.
:param parameters: PARAMETERS. Parameters whose gradients we wish to clip.
:param max_norm: float. Maximum norm we wish the gradients to have. If non-positive, then we will not perform
clipping.
:param sync: bool. Boolean indicating whether we should aggregate across the distributed group. Used only in
combination with FSDP.
:returns: The gradient norm across all parameters, before clipping.
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
# make sure any generators are expanded
parameters = list(parameters)
# if syncing we need to manually perform the clipping so that we aggregate properly
if max_norm > 0 and not sync:
return clip_grad_norm_(parameters, max_norm)
norm_sq = sum(p.grad.norm() ** 2 for p in parameters if p.grad is not None)
if sync:
# also need to get the norms from all the other sharded works in FSDP
all_reduce(norm_sq)
grad_norm = math.sqrt(norm_sq)
if max_norm > 0:
clip_coefficient = max_norm / (grad_norm + 1e-6)
for p in parameters:
p.grad.detach().mul_(clip_coefficient)
return grad_norm
|
(parameters: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], max_norm: float = 0.0, sync: bool = False) -> Union[torch.Tensor, float]
|
718,540 |
pytorch_optimizer.optimizer.shampoo_utils
|
compute_power_schur_newton
|
Compute G^{-1/p} using a coupled Newton iteration.
See for example equation 3.2 on page 9 of:
A Schur-Newton Method for the Matrix p-th Root and its Inverse by Chun-Hua Guo and Nicholas J. Higham
SIAM Journal on Matrix Analysis and Applications, 2006, Vol. 28, No. 3 : pp. 788-804
https://pdfs.semanticscholar.org/0abe/7f77433cf5908bfe2b79aa91af881da83858.pdf.
The best value for z is (1 + p) * (c_max^{1/p} - c_min^{1/p}) / (c_max^{1+1/p} - c_min^{1+1/p})
where c_max and c_min are the largest and smallest singular values of mat_g.
The above estimate assumes that c_max > c_min * 2^p can replace above line by the one below,
but it is less accurate, hence needs more iterations to converge.
z = (1 + p) / tf.trace(mat_g)
If we want the method to always converge, use z = 1 / norm(mat_g) or z = 1 / tf.trace(mat_g),
but these can result in many extra iterations.
:param mat_g: torch.Tensor. A square positive semi-definite matrix.
:param p: int. a positive integer.
:param max_iters: int. Stop iterating after this many rounds.
:param error_tolerance: float. Threshold for stopping iteration.
:param ridge_epsilon: float. We add this times I to G, to make is positive definite.
For scaling, we multiply it by the largest eigenvalue of G.
:param max_error_ratio: float. Sometimes error increases after an iteration before decreasing and converging.
1.2 factor is used to bound the maximal allowed increase.
|
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad / (torch.sqrt(self.statistics) + self.diagonal_eps)
|
(mat_g: torch.Tensor, p: int, max_iters: int = 100, error_tolerance: float = 0.001, ridge_epsilon: float = 1e-06, max_error_ratio: float = 1.2) -> torch.Tensor
|
718,541 |
pytorch_optimizer.optimizer.shampoo_utils
|
compute_power_svd
|
Compute G^{-1/p} using a SVD.
Calculate SVD on the GPU. Sometimes, SVD on the CPU is faster than GPU, but based on the several experiments,
CUDA seems much faster than on CPU.
:param matrix: torch.Tensor. a square positive semi-definite matrix.
:param power: float. rank.
|
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad / (torch.sqrt(self.statistics) + self.diagonal_eps)
|
(matrix: torch.Tensor, power: float) -> torch.Tensor
|
718,542 |
pytorch_optimizer
|
create_optimizer
|
Build optimizer.
:param model: nn.Module. model.
:param optimizer_name: str. name of optimizer.
:param lr: float. learning rate.
:param weight_decay: float. weight decay.
:param wd_ban_list: List[str]. weight decay ban list by layer.
:param use_lookahead: bool. use lookahead.
|
def create_optimizer(
model: nn.Module,
optimizer_name: str,
lr: float = 1e-3,
weight_decay: float = 0.0,
wd_ban_list: List[str] = ('bias', 'LayerNorm.bias', 'LayerNorm.weight'),
use_lookahead: bool = False,
**kwargs,
):
r"""Build optimizer.
:param model: nn.Module. model.
:param optimizer_name: str. name of optimizer.
:param lr: float. learning rate.
:param weight_decay: float. weight decay.
:param wd_ban_list: List[str]. weight decay ban list by layer.
:param use_lookahead: bool. use lookahead.
"""
optimizer_name = optimizer_name.lower()
parameters = (
get_optimizer_parameters(model, weight_decay, wd_ban_list) if weight_decay > 0.0 else model.parameters()
)
optimizer = load_optimizer(optimizer_name)
if optimizer_name == 'alig':
optimizer = optimizer(parameters, max_lr=lr, **kwargs)
else:
optimizer = optimizer(parameters, lr=lr, **kwargs)
if use_lookahead:
optimizer = Lookahead(
optimizer,
k=kwargs['k'] if 'k' in kwargs else 5,
alpha=kwargs['alpha'] if 'alpha' in kwargs else 0.5,
pullback_momentum=kwargs['pullback_momentum'] if 'pullback_momentum' in kwargs else 'none',
)
return optimizer
|
(model: torch.nn.modules.module.Module, optimizer_name: str, lr: float = 0.001, weight_decay: float = 0.0, wd_ban_list: List[str] = ('bias', 'LayerNorm.bias', 'LayerNorm.weight'), use_lookahead: bool = False, **kwargs)
|
718,543 |
pytorch_optimizer.lr_scheduler.experimental.deberta_v3_lr_scheduler
|
deberta_v3_large_lr_scheduler
|
DeBERTa-v3 large layer-wise lr scheduler.
Reference : https://github.com/gilfernandes/commonlit.
:param model: nn.Module. model. based on Huggingface Transformers.
:param layer_low_threshold: int. start of the 12 layers.
:param layer_middle_threshold: int. end of the 24 layers.
:param head_param_start: int. where the backbone ends (head starts).
:param base_lr: float. base lr.
:param head_lr: float. head_lr.
:param wd: float. weight decay.
|
def deberta_v3_large_lr_scheduler(
model: nn.Module,
layer_low_threshold: int = 195,
layer_middle_threshold: int = 323,
head_param_start: int = 390,
base_lr: float = 2e-5,
head_lr: float = 1e-4,
wd: float = 1e-2,
) -> PARAMETERS:
"""DeBERTa-v3 large layer-wise lr scheduler.
Reference : https://github.com/gilfernandes/commonlit.
:param model: nn.Module. model. based on Huggingface Transformers.
:param layer_low_threshold: int. start of the 12 layers.
:param layer_middle_threshold: int. end of the 24 layers.
:param head_param_start: int. where the backbone ends (head starts).
:param base_lr: float. base lr.
:param head_lr: float. head_lr.
:param wd: float. weight decay.
"""
named_parameters = list(model.named_parameters())
backbone_parameters = named_parameters[:head_param_start]
head_parameters = named_parameters[head_param_start:]
head_group = [params for (_, params) in head_parameters]
parameters = [{'params': head_group, 'lr': head_lr}]
for layer_num, (name, params) in enumerate(backbone_parameters):
weight_decay: float = 0.0 if ('bias' in name) or ('LayerNorm.weight' in name) else wd
lr = base_lr / 2.5 # 2e-5
if layer_num >= layer_middle_threshold:
lr = base_lr / 0.5 # 1e-4
elif layer_num >= layer_low_threshold:
lr = base_lr
parameters.append({'params': params, 'weight_decay': weight_decay, 'lr': lr})
return parameters
|
(model: torch.nn.modules.module.Module, layer_low_threshold: int = 195, layer_middle_threshold: int = 323, head_param_start: int = 390, base_lr: float = 2e-05, head_lr: float = 0.0001, wd: float = 0.01) -> Union[Iterable[Dict], Iterable[torch.Tensor], NoneType]
|
718,544 |
pytorch_optimizer.optimizer.utils
|
disable_running_stats
|
Disable running stats (momentum) of BatchNorm.
|
def disable_running_stats(model):
r"""Disable running stats (momentum) of BatchNorm."""
def _disable(module):
if isinstance(module, _BatchNorm):
module.backup_momentum = module.momentum
module.momentum = 0
model.apply(_disable)
|
(model)
|
718,545 |
pytorch_optimizer.optimizer.utils
|
enable_running_stats
|
Enable running stats (momentum) of BatchNorm.
|
def enable_running_stats(model):
r"""Enable running stats (momentum) of BatchNorm."""
def _enable(module):
if isinstance(module, _BatchNorm) and hasattr(module, 'backup_momentum'):
module.momentum = module.backup_momentum
model.apply(_enable)
|
(model)
|
718,546 |
pytorch_optimizer.lr_scheduler.chebyshev
|
get_chebyshev_lr
|
Get chebyshev learning rate.
:param lr: float. learning rate.
:param epoch: int. current epochs.
:param num_epochs: int. number of total epochs.
:param is_warmup: bool. whether warm-up stage or not.
|
def get_chebyshev_lr(lr: float, epoch: int, num_epochs: int, is_warmup: bool = False) -> float:
r"""Get chebyshev learning rate.
:param lr: float. learning rate.
:param epoch: int. current epochs.
:param num_epochs: int. number of total epochs.
:param is_warmup: bool. whether warm-up stage or not.
"""
if is_warmup:
return lr
epoch_power: int = np.power(2, int(np.log2(num_epochs - 1)) + 1) if num_epochs > 1 else 1
scheduler = get_chebyshev_schedule(epoch_power)
idx: int = epoch - 2
if idx < 0:
idx = 0
elif idx > len(scheduler) - 1:
idx = len(scheduler) - 1
chebyshev_value: float = scheduler[idx]
return lr * chebyshev_value
|
(lr: float, epoch: int, num_epochs: int, is_warmup: bool = False) -> float
|
718,547 |
pytorch_optimizer.lr_scheduler.chebyshev
|
get_chebyshev_schedule
|
Get Chebyshev schedules.
:param num_epochs: int. number of total epochs.
|
def get_chebyshev_schedule(num_epochs: int) -> np.ndarray:
r"""Get Chebyshev schedules.
:param num_epochs: int. number of total epochs.
"""
steps: np.ndarray = get_chebyshev_steps(num_epochs)
perm: np.ndarray = get_chebyshev_permutation(num_epochs - 2)
return steps[perm]
|
(num_epochs: int) -> numpy.ndarray
|
718,548 |
pytorch_optimizer.optimizer.utils
|
get_global_gradient_norm
|
Get global gradient norm.
|
def clip_grad_norm(
parameters: PARAMETERS,
max_norm: float = 0.0,
sync: bool = False,
) -> Union[torch.Tensor, float]: # pragma: no cover
r"""Clip gradient norms.
During combination with FSDP, will also ensure that grad norms are aggregated across all workers,
since each worker only stores their shard of the gradients.
:param parameters: PARAMETERS. Parameters whose gradients we wish to clip.
:param max_norm: float. Maximum norm we wish the gradients to have. If non-positive, then we will not perform
clipping.
:param sync: bool. Boolean indicating whether we should aggregate across the distributed group. Used only in
combination with FSDP.
:returns: The gradient norm across all parameters, before clipping.
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
# make sure any generators are expanded
parameters = list(parameters)
# if syncing we need to manually perform the clipping so that we aggregate properly
if max_norm > 0 and not sync:
return clip_grad_norm_(parameters, max_norm)
norm_sq = sum(p.grad.norm() ** 2 for p in parameters if p.grad is not None)
if sync:
# also need to get the norms from all the other sharded works in FSDP
all_reduce(norm_sq)
grad_norm = math.sqrt(norm_sq)
if max_norm > 0:
clip_coefficient = max_norm / (grad_norm + 1e-6)
for p in parameters:
p.grad.detach().mul_(clip_coefficient)
return grad_norm
|
(param_groups: List[Dict], device: torch.device) -> torch.Tensor
|
718,549 |
pytorch_optimizer.optimizer.utils
|
get_optimizer_parameters
|
Get optimizer parameters while filtering specified modules.
:param model_or_parameter: Union[nn.Module, List]. model or parameters.
:param weight_decay: float. weight_decay.
:param wd_ban_list: List[str]. ban list not to set weight decay.
:returns: PARAMETERS. new parameter list.
|
def get_optimizer_parameters(
model_or_parameter: Union[nn.Module, List],
weight_decay: float,
wd_ban_list: List[str] = ('bias', 'LayerNorm.bias', 'LayerNorm.weight'),
) -> PARAMETERS:
r"""Get optimizer parameters while filtering specified modules.
:param model_or_parameter: Union[nn.Module, List]. model or parameters.
:param weight_decay: float. weight_decay.
:param wd_ban_list: List[str]. ban list not to set weight decay.
:returns: PARAMETERS. new parameter list.
"""
if isinstance(model_or_parameter, nn.Module):
model_or_parameter = list(model_or_parameter.named_parameters())
return [
{
'params': [p for n, p in model_or_parameter if p.requires_grad and not any(nd in n for nd in wd_ban_list)],
'weight_decay': weight_decay,
},
{
'params': [p for n, p in model_or_parameter if p.requires_grad and any(nd in n for nd in wd_ban_list)],
'weight_decay': 0.0,
},
]
|
(model_or_parameter: Union[torch.nn.modules.module.Module, List], weight_decay: float, wd_ban_list: List[str] = ('bias', 'LayerNorm.bias', 'LayerNorm.weight')) -> Union[Iterable[Dict], Iterable[torch.Tensor], NoneType]
|
718,550 |
pytorch_optimizer
|
get_supported_loss_functions
| null |
def get_supported_loss_functions() -> List[nn.Module]:
return LOSS_FUNCTION_LIST
|
() -> List[torch.nn.modules.module.Module]
|
718,551 |
pytorch_optimizer
|
get_supported_lr_schedulers
| null |
def get_supported_lr_schedulers() -> List[SCHEDULER]:
return LR_SCHEDULER_LIST
|
() -> List[Type[torch.optim.lr_scheduler._LRScheduler]]
|
718,552 |
pytorch_optimizer
|
get_supported_optimizers
| null |
def get_supported_optimizers() -> List[OPTIMIZER]:
return OPTIMIZER_LIST
|
() -> List[Type[torch.optim.optimizer.Optimizer]]
|
718,553 |
pytorch_optimizer
|
load_bnb_optimizer
|
load bnb optimizer instance.
|
def load_bnb_optimizer(optimizer: str) -> OPTIMIZER: # pragma: no cover
r"""load bnb optimizer instance."""
if 'sgd8bit' in optimizer:
return bnb.optim.SGD8bit
if 'adam8bit' in optimizer:
return bnb.optim.Adam8bit
if 'adamw8bit' in optimizer:
return bnb.optim.AdamW8bit
if 'lamb8bit' in optimizer:
return bnb.optim.LAMB8bit
if 'lars8bit' in optimizer:
return bnb.optim.LARS8bit
if 'lion8bit' in optimizer:
return bnb.optim.Lion8bit
if 'adagrad8bit' in optimizer:
return bnb.optim.Adagrad8bit
if 'rmsprop8bit' in optimizer:
return bnb.optim.RMSprop8bit
raise NotImplementedError(f'[-] not implemented optimizer : {optimizer}')
|
(optimizer: str) -> Type[torch.optim.optimizer.Optimizer]
|
718,554 |
pytorch_optimizer
|
load_lr_scheduler
| null |
def load_lr_scheduler(lr_scheduler: str) -> SCHEDULER:
lr_scheduler: str = lr_scheduler.lower()
if lr_scheduler not in LR_SCHEDULERS:
raise NotImplementedError(f'[-] not implemented lr_scheduler : {lr_scheduler}')
return LR_SCHEDULERS[lr_scheduler]
|
(lr_scheduler: str) -> Type[torch.optim.lr_scheduler._LRScheduler]
|
718,555 |
pytorch_optimizer
|
load_optimizer
| null |
def load_optimizer(optimizer: str) -> OPTIMIZER:
optimizer: str = optimizer.lower()
if optimizer.startswith('bnb'):
if HAS_BNB and torch.cuda.is_available():
return load_bnb_optimizer(optimizer) # pragma: no cover
raise ImportError(f'[-] bitsandbytes and CUDA required for bnb optimizers : {optimizer}')
if optimizer not in OPTIMIZERS:
raise NotImplementedError(f'[-] not implemented optimizer : {optimizer}')
return OPTIMIZERS[optimizer]
|
(optimizer: str) -> Type[torch.optim.optimizer.Optimizer]
|
718,558 |
pytorch_optimizer.optimizer.shampoo_utils
|
merge_small_dims
|
Merge small dimensions.
If there are some small dimensions, we collapse them
e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024
[1, 2, 768, 1, 2048] --> [2, 768, 2048].
:param shape_to_merge: List[int]. Shape to merge small dimensions.
:param max_dim: int. Maximal dimension of output shape used in merging.
|
def merge_small_dims(shape_to_merge: List[int], max_dim: int) -> List[int]:
r"""Merge small dimensions.
If there are some small dimensions, we collapse them
e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024
[1, 2, 768, 1, 2048] --> [2, 768, 2048].
:param shape_to_merge: List[int]. Shape to merge small dimensions.
:param max_dim: int. Maximal dimension of output shape used in merging.
"""
merged_shape: List[int] = []
product: int = 1
for dim in shape_to_merge:
product *= dim
if product > max_dim:
merged_shape.append(product // dim)
product = dim
merged_shape.append(product)
return merged_shape if len(merged_shape) > 1 else [1]
|
(shape_to_merge: List[int], max_dim: int) -> List[int]
|
718,560 |
pytorch_optimizer.optimizer.utils
|
normalize_gradient
|
Normalize gradient with stddev.
:param x: torch.Tensor. gradient.
:param use_channels: bool. channel-wise normalization.
:param epsilon: float. eps.
|
def normalize_gradient(x: torch.Tensor, use_channels: bool = False, epsilon: float = 1e-8):
r"""Normalize gradient with stddev.
:param x: torch.Tensor. gradient.
:param use_channels: bool. channel-wise normalization.
:param epsilon: float. eps.
"""
size: int = x.dim()
if size > 1 and use_channels:
s = x.std(dim=tuple(range(1, size)), keepdim=True).add_(epsilon)
x.div_(s)
elif torch.numel(x) > 2:
s = x.std().add_(epsilon)
x.div_(s)
|
(x: torch.Tensor, use_channels: bool = False, epsilon: float = 1e-08)
|
718,562 |
pytorch_optimizer.optimizer.shampoo_utils
|
power_iteration
|
Compute the maximum eigenvalue of matrix, for scaling.
Mostly, power_iteration method is faster than torch.einval in case of the symmetric PSD matrix.
Also, I removed the validation, error of singular value every iteration, so that boosting the speed.
:param mat_g: torch.Tensor. the symmetric PSD matrix.
:param num_iters: int. Number of iterations.
|
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad / (torch.sqrt(self.statistics) + self.diagonal_eps)
|
(mat_g: torch.Tensor, num_iters: int = 100) -> torch.Tensor
|
718,563 |
pytorch_optimizer.optimizer.utils
|
reduce_max_except_dim
|
Perform reduce-max along all dimensions except the given dim.
:param x: torch.Tensor. tensor to reduce-max.
:param dim: int. dimension to exclude.
|
def clip_grad_norm(
parameters: PARAMETERS,
max_norm: float = 0.0,
sync: bool = False,
) -> Union[torch.Tensor, float]: # pragma: no cover
r"""Clip gradient norms.
During combination with FSDP, will also ensure that grad norms are aggregated across all workers,
since each worker only stores their shard of the gradients.
:param parameters: PARAMETERS. Parameters whose gradients we wish to clip.
:param max_norm: float. Maximum norm we wish the gradients to have. If non-positive, then we will not perform
clipping.
:param sync: bool. Boolean indicating whether we should aggregate across the distributed group. Used only in
combination with FSDP.
:returns: The gradient norm across all parameters, before clipping.
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
# make sure any generators are expanded
parameters = list(parameters)
# if syncing we need to manually perform the clipping so that we aggregate properly
if max_norm > 0 and not sync:
return clip_grad_norm_(parameters, max_norm)
norm_sq = sum(p.grad.norm() ** 2 for p in parameters if p.grad is not None)
if sync:
# also need to get the norms from all the other sharded works in FSDP
all_reduce(norm_sq)
grad_norm = math.sqrt(norm_sq)
if max_norm > 0:
clip_coefficient = max_norm / (grad_norm + 1e-6)
for p in parameters:
p.grad.detach().mul_(clip_coefficient)
return grad_norm
|
(x: torch.Tensor, dim: int) -> torch.Tensor
|
718,564 |
pytorch_optimizer.loss.dice
|
soft_dice_score
|
Get soft dice score.
:param output: torch.Tensor. predicted segments.
:param target. torch.Tensor. ground truth segments.
:param label_smooth: float. label smoothing factor.
:param eps: float. epsilon.
:param dims: Optional[Tuple[int, ...]]. target dimensions to reduce.
|
def soft_dice_score(
output: torch.Tensor,
target: torch.Tensor,
label_smooth: float = 0.0,
eps: float = 1e-6,
dims: Optional[Tuple[int, ...]] = None,
) -> torch.Tensor:
r"""Get soft dice score.
:param output: torch.Tensor. predicted segments.
:param target. torch.Tensor. ground truth segments.
:param label_smooth: float. label smoothing factor.
:param eps: float. epsilon.
:param dims: Optional[Tuple[int, ...]]. target dimensions to reduce.
"""
if dims is not None:
intersection = torch.sum(output * target, dim=dims)
cardinality = torch.sum(output + target, dim=dims)
else:
intersection = torch.sum(output * target)
cardinality = torch.sum(output + target)
return (2.0 * intersection + label_smooth) / (cardinality + label_smooth).clamp_min(eps)
|
(output: torch.Tensor, target: torch.Tensor, label_smooth: float = 0.0, eps: float = 1e-06, dims: Optional[Tuple[int, ...]] = None) -> torch.Tensor
|
718,565 |
pytorch_optimizer.loss.jaccard
|
soft_jaccard_score
|
Get soft jaccard score.
:param output: torch.Tensor. predicted segments.
:param target: torch.Tensor. ground truth segments.
:param label_smooth: float. label smoothing factor.
:param eps: float. epsilon.
:param dims: Optional[Tuple[int, ...]]. target dimensions to reduce.
|
def soft_jaccard_score(
output: torch.Tensor,
target: torch.Tensor,
label_smooth: float = 0.0,
eps: float = 1e-6,
dims: Optional[Tuple[int, ...]] = None,
) -> torch.Tensor:
r"""Get soft jaccard score.
:param output: torch.Tensor. predicted segments.
:param target: torch.Tensor. ground truth segments.
:param label_smooth: float. label smoothing factor.
:param eps: float. epsilon.
:param dims: Optional[Tuple[int, ...]]. target dimensions to reduce.
"""
if dims is not None:
intersection = torch.sum(output * target, dim=dims)
cardinality = torch.sum(output + target, dim=dims)
else:
intersection = torch.sum(output * target)
cardinality = torch.sum(output + target)
return (intersection + label_smooth) / (cardinality - intersection + label_smooth).clamp_min(eps)
|
(output: torch.Tensor, target: torch.Tensor, label_smooth: float = 0.0, eps: float = 1e-06, dims: Optional[Tuple[int, ...]] = None) -> torch.Tensor
|
718,567 |
pytorch_optimizer.optimizer.utils
|
unit_norm
|
Get norm of unit.
|
def unit_norm(x: torch.Tensor, norm: float = 2.0) -> torch.Tensor:
r"""Get norm of unit."""
keep_dim: bool = True
dim: Optional[Union[int, Tuple[int, ...]]] = None
x_len: int = len(x.shape)
if x_len <= 1:
keep_dim = False
elif x_len in (2, 3): # linear layers
dim = 1
elif x_len == 4: # conv kernels
dim = (1, 2, 3)
else:
dim = tuple(range(1, x_len))
return x.norm(p=norm, dim=dim, keepdim=keep_dim)
|
(x: torch.Tensor, norm: float = 2.0) -> torch.Tensor
|
718,568 |
masky.core
|
Masky
| null |
class Masky:
def __init__(
self,
ca,
user,
template="User",
domain=".",
password=None,
hashes=None,
kerberos=False,
dc_ip=None,
quiet=True,
stealth=False,
exe_path=None,
file_args=False,
):
self.__ca = ca
self.__template = template
self.__domain = domain
self.__user = user
self.__password = password
self.__hashes = hashes
self.__kerberos = kerberos
self.__dc_ip = dc_ip
self.__dc_target = None
self.__quiet = quiet
self.__stealth = stealth
self.__exe_path = exe_path
self.__file_args = file_args
self.__tracker = Tracker()
def __process_options(self):
try:
self.__dc_target = socket.gethostbyname(self.__domain)
except:
self.__dc_target = self.__dc_ip
if self.__dc_target == "0.0.0.0":
self.__dc_target = self.__dc_ip
if not self.__dc_target:
err_msg = f"The provided domain '{self.__domain}' cannot be resolved, please set the full FQDN or provide the DC IP address"
logger.error(err_msg)
self.__tracker.last_error_msg = err_msg
return False
return True
def __process_certificate(self, user_data):
certipy_auth = Authenticate(
self.__tracker, self.__domain, self.__dc_ip, user_data, False, False
)
if certipy_auth.authenticate():
return True
return False
def run(self, target):
self.__tracker = Tracker()
add_result_level()
if self.__quiet:
logger.disabled = True
if not self.__process_options():
return None
if not scan_port(target):
logger.info("The port tcp/445 seems not exposed, skipping this target")
return None
s = Smb(
self.__tracker,
self.__domain,
self.__user,
password=self.__password,
hashes=self.__hashes,
kerberos=self.__kerberos,
dc_target=self.__dc_target,
stealth=self.__stealth,
exe_path=self.__exe_path,
file_args=self.__file_args,
)
rslt = None
try:
rslt = s.exec_masky(target, self.__ca, self.__template)
except:
return rslt
self.__tracker.nb_hijacked_users = len(rslt.users)
if not rslt or not rslt.users:
logger.info("No user session was hijacked")
return None
if len(rslt.users) == 1:
logger.info(f"{len(rslt.users)} user session was hijacked")
else:
logger.info(f"{len(rslt.users)} user sessions were hijacked")
for user_data in rslt.users:
logger.debug(
f"Start processing PFX of the user '{user_data.domain}\{user_data.name}'"
)
if not self.__process_certificate(user_data):
logger.warn(
f"Fail to process gathered certificate related to the user '{user_data.domain}\{user_data.name}'"
)
else:
logger.debug(
f"End processing PFX of the user '{user_data.domain}\{user_data.name}'"
)
return rslt
def get_last_tracker(self):
return self.__tracker
|
(ca, user, template='User', domain='.', password=None, hashes=None, kerberos=False, dc_ip=None, quiet=True, stealth=False, exe_path=None, file_args=False)
|
718,569 |
masky.core
|
__process_certificate
| null |
def __process_certificate(self, user_data):
certipy_auth = Authenticate(
self.__tracker, self.__domain, self.__dc_ip, user_data, False, False
)
if certipy_auth.authenticate():
return True
return False
|
(self, user_data)
|
718,570 |
masky.core
|
__process_options
| null |
def __process_options(self):
try:
self.__dc_target = socket.gethostbyname(self.__domain)
except:
self.__dc_target = self.__dc_ip
if self.__dc_target == "0.0.0.0":
self.__dc_target = self.__dc_ip
if not self.__dc_target:
err_msg = f"The provided domain '{self.__domain}' cannot be resolved, please set the full FQDN or provide the DC IP address"
logger.error(err_msg)
self.__tracker.last_error_msg = err_msg
return False
return True
|
(self)
|
718,571 |
masky.core
|
__init__
| null |
def __init__(
self,
ca,
user,
template="User",
domain=".",
password=None,
hashes=None,
kerberos=False,
dc_ip=None,
quiet=True,
stealth=False,
exe_path=None,
file_args=False,
):
self.__ca = ca
self.__template = template
self.__domain = domain
self.__user = user
self.__password = password
self.__hashes = hashes
self.__kerberos = kerberos
self.__dc_ip = dc_ip
self.__dc_target = None
self.__quiet = quiet
self.__stealth = stealth
self.__exe_path = exe_path
self.__file_args = file_args
self.__tracker = Tracker()
|
(self, ca, user, template='User', domain='.', password=None, hashes=None, kerberos=False, dc_ip=None, quiet=True, stealth=False, exe_path=None, file_args=False)
|
718,572 |
masky.core
|
get_last_tracker
| null |
def get_last_tracker(self):
return self.__tracker
|
(self)
|
718,573 |
masky.core
|
run
| null |
def run(self, target):
self.__tracker = Tracker()
add_result_level()
if self.__quiet:
logger.disabled = True
if not self.__process_options():
return None
if not scan_port(target):
logger.info("The port tcp/445 seems not exposed, skipping this target")
return None
s = Smb(
self.__tracker,
self.__domain,
self.__user,
password=self.__password,
hashes=self.__hashes,
kerberos=self.__kerberos,
dc_target=self.__dc_target,
stealth=self.__stealth,
exe_path=self.__exe_path,
file_args=self.__file_args,
)
rslt = None
try:
rslt = s.exec_masky(target, self.__ca, self.__template)
except:
return rslt
self.__tracker.nb_hijacked_users = len(rslt.users)
if not rslt or not rslt.users:
logger.info("No user session was hijacked")
return None
if len(rslt.users) == 1:
logger.info(f"{len(rslt.users)} user session was hijacked")
else:
logger.info(f"{len(rslt.users)} user sessions were hijacked")
for user_data in rslt.users:
logger.debug(
f"Start processing PFX of the user '{user_data.domain}\{user_data.name}'"
)
if not self.__process_certificate(user_data):
logger.warn(
f"Fail to process gathered certificate related to the user '{user_data.domain}\{user_data.name}'"
)
else:
logger.debug(
f"End processing PFX of the user '{user_data.domain}\{user_data.name}'"
)
return rslt
|
(self, target)
|
718,579 |
hypothesis_graphql._strategies.strategy
|
from_schema
|
A strategy for generating valid queries and mutations for the given GraphQL schema.
:param schema: GraphQL schema as a string or `graphql.GraphQLSchema`.
:param fields: Restrict generated fields to ones in this list.
:param custom_scalars: Strategies for generating custom scalars.
:param print_ast: A function to convert the generated AST to a string.
:param allow_x00: Determines whether to allow the generation of `\x00` bytes within strings.
:param codec: Specifies the codec used for generating strings.
|
@cacheable # type: ignore
def from_schema(
schema: Union[str, graphql.GraphQLSchema],
*,
fields: Optional[Iterable[str]] = None,
custom_scalars: Optional[CustomScalarStrategies] = None,
print_ast: AstPrinter = graphql.print_ast,
allow_x00: bool = True,
codec: Optional[str] = "utf-8",
) -> st.SearchStrategy[str]:
r"""A strategy for generating valid queries and mutations for the given GraphQL schema.
:param schema: GraphQL schema as a string or `graphql.GraphQLSchema`.
:param fields: Restrict generated fields to ones in this list.
:param custom_scalars: Strategies for generating custom scalars.
:param print_ast: A function to convert the generated AST to a string.
:param allow_x00: Determines whether to allow the generation of `\x00` bytes within strings.
:param codec: Specifies the codec used for generating strings.
"""
parsed_schema = validation.maybe_parse_schema(schema)
if custom_scalars:
validation.validate_custom_scalars(custom_scalars)
query = parsed_schema.query_type
mutation = parsed_schema.mutation_type
query_fields = None
mutation_fields = None
if fields is not None:
# Split fields based on the type they are defined on & validate them
fields = tuple(fields)
available_fields = []
if query is not None:
query_fields = tuple(field for field in fields if field in query.fields)
available_fields.extend(query.fields)
if mutation is not None:
mutation_fields = tuple(field for field in fields if field in mutation.fields)
available_fields.extend(mutation.fields)
validation.validate_fields(fields, available_fields)
alphabet = _build_alphabet(allow_x00=allow_x00, codec=codec)
strategy = GraphQLStrategy(parsed_schema, alphabet=alphabet, custom_scalars=custom_scalars or {})
strategies = [
strategy.selections(type_, fields=type_fields).map(node_factory).map(print_ast)
for (type_, type_fields, node_factory) in (
(query, query_fields, make_query),
(mutation, mutation_fields, make_mutation),
)
# If a type is defined in the schema and don't have restrictions on fields or has at least one selected field
if type_ is not None and (type_fields is None or len(type_fields) > 0)
]
if not strategies:
raise InvalidArgument("Query or Mutation type must be provided")
return reduce(or_, strategies)
|
(schema: Union[str, graphql.type.schema.GraphQLSchema], *, fields: Optional[Iterable[str]] = None, custom_scalars: Optional[Dict[str, hypothesis.strategies.SearchStrategy[graphql.language.ast.ValueNode]]] = None, print_ast: Callable[[graphql.language.ast.Node], str] = <function print_ast at 0x7fb911e60820>, allow_x00: bool = True, codec: Optional[str] = 'utf-8') -> hypothesis.strategies.SearchStrategy[str]
|
718,580 |
hypothesis_graphql._strategies.strategy
|
mutations
|
A strategy for generating valid mutations for the given GraphQL schema.
The output mutation will contain a subset of fields defined in the `Mutation` type.
:param schema: GraphQL schema as a string or `graphql.GraphQLSchema`.
:param fields: Restrict generated fields to ones in this list.
:param custom_scalars: Strategies for generating custom scalars.
:param print_ast: A function to convert the generated AST to a string.
:param allow_x00: Determines whether to allow the generation of `\x00` bytes within strings.
:param codec: Specifies the codec used for generating strings.
|
@cacheable # type: ignore
def mutations(
schema: Union[str, graphql.GraphQLSchema],
*,
fields: Optional[Iterable[str]] = None,
custom_scalars: Optional[CustomScalarStrategies] = None,
print_ast: AstPrinter = graphql.print_ast,
allow_x00: bool = True,
codec: Optional[str] = "utf-8",
) -> st.SearchStrategy[str]:
r"""A strategy for generating valid mutations for the given GraphQL schema.
The output mutation will contain a subset of fields defined in the `Mutation` type.
:param schema: GraphQL schema as a string or `graphql.GraphQLSchema`.
:param fields: Restrict generated fields to ones in this list.
:param custom_scalars: Strategies for generating custom scalars.
:param print_ast: A function to convert the generated AST to a string.
:param allow_x00: Determines whether to allow the generation of `\x00` bytes within strings.
:param codec: Specifies the codec used for generating strings.
"""
parsed_schema = validation.maybe_parse_schema(schema)
if parsed_schema.mutation_type is None:
raise InvalidArgument("Mutation type is not defined in the schema")
alphabet = _build_alphabet(allow_x00=allow_x00, codec=codec)
return (
_make_strategy(
parsed_schema,
type_=parsed_schema.mutation_type,
fields=fields,
custom_scalars=custom_scalars,
alphabet=alphabet,
)
.map(make_mutation)
.map(print_ast)
)
|
(schema: Union[str, graphql.type.schema.GraphQLSchema], *, fields: Optional[Iterable[str]] = None, custom_scalars: Optional[Dict[str, hypothesis.strategies.SearchStrategy[graphql.language.ast.ValueNode]]] = None, print_ast: Callable[[graphql.language.ast.Node], str] = <function print_ast at 0x7fb911e60820>, allow_x00: bool = True, codec: Optional[str] = 'utf-8') -> hypothesis.strategies.SearchStrategy[str]
|
718,582 |
hypothesis_graphql._strategies.strategy
|
queries
|
A strategy for generating valid queries for the given GraphQL schema.
The output query will contain a subset of fields defined in the `Query` type.
:param schema: GraphQL schema as a string or `graphql.GraphQLSchema`.
:param fields: Restrict generated fields to ones in this list.
:param custom_scalars: Strategies for generating custom scalars.
:param print_ast: A function to convert the generated AST to a string.
:param allow_x00: Determines whether to allow the generation of `\x00` bytes within strings.
:param codec: Specifies the codec used for generating strings.
|
@cacheable # type: ignore
def queries(
schema: Union[str, graphql.GraphQLSchema],
*,
fields: Optional[Iterable[str]] = None,
custom_scalars: Optional[CustomScalarStrategies] = None,
print_ast: AstPrinter = graphql.print_ast,
allow_x00: bool = True,
codec: Optional[str] = "utf-8",
) -> st.SearchStrategy[str]:
r"""A strategy for generating valid queries for the given GraphQL schema.
The output query will contain a subset of fields defined in the `Query` type.
:param schema: GraphQL schema as a string or `graphql.GraphQLSchema`.
:param fields: Restrict generated fields to ones in this list.
:param custom_scalars: Strategies for generating custom scalars.
:param print_ast: A function to convert the generated AST to a string.
:param allow_x00: Determines whether to allow the generation of `\x00` bytes within strings.
:param codec: Specifies the codec used for generating strings.
"""
parsed_schema = validation.maybe_parse_schema(schema)
if parsed_schema.query_type is None:
raise InvalidArgument("Query type is not defined in the schema")
alphabet = _build_alphabet(allow_x00=allow_x00, codec=codec)
return (
_make_strategy(
parsed_schema,
type_=parsed_schema.query_type,
fields=fields,
custom_scalars=custom_scalars,
alphabet=alphabet,
)
.map(make_query)
.map(print_ast)
)
|
(schema: Union[str, graphql.type.schema.GraphQLSchema], *, fields: Optional[Iterable[str]] = None, custom_scalars: Optional[Dict[str, hypothesis.strategies.SearchStrategy[graphql.language.ast.ValueNode]]] = None, print_ast: Callable[[graphql.language.ast.Node], str] = <function print_ast at 0x7fb911e60820>, allow_x00: bool = True, codec: Optional[str] = 'utf-8') -> hypothesis.strategies.SearchStrategy[str]
|
718,585 |
hypothesis_graphql._strategies.validation
|
validate_scalar_strategy
| null |
def validate_scalar_strategy(name: str, strategy: st.SearchStrategy) -> None:
if not isinstance(name, str):
raise InvalidArgument(f"scalar name {name!r} must be a string")
if not isinstance(strategy, st.SearchStrategy):
raise InvalidArgument(
f"custom_scalars[{name!r}]={strategy!r} must be a Hypothesis "
"strategy which generates AST nodes matching this scalar."
)
|
(name: str, strategy: hypothesis.strategies.SearchStrategy) -> NoneType
|
718,586 |
pluginlib._objects
|
BlacklistEntry
|
Args:
plugin_type(str): Parent type
name(str): Plugin name
version(str): Plugin version
operator(str): Comparison operator ('=', '==', '!=', '<', '<=', '>', '>=')
**Container for blacklist entry**
If ``operator`` is :py:data:`None` or not specified, it defaults to '=='.
One of ``plugin_type``, ``name``, or ``version`` must be specified.
If any are unspecified or :py:data:`None`, they are treated as a wildcard.
In order to be more compatible with parsed text,
the order of ``operator`` and ``version`` can be swapped. The following are equivalent:
.. code-block:: python
BlacklistEntry('parser', 'json', '1.0', '>=')
.. code-block:: python
BlacklistEntry('parser', 'json', '>=', '1.0')
``version`` is evaluated using :py:func:`pkg_resources.parse_version`
and should conform to `PEP 440`_
.. _PEP 440: https://www.python.org/dev/peps/pep-0440/
|
class BlacklistEntry(object):
"""
Args:
plugin_type(str): Parent type
name(str): Plugin name
version(str): Plugin version
operator(str): Comparison operator ('=', '==', '!=', '<', '<=', '>', '>=')
**Container for blacklist entry**
If ``operator`` is :py:data:`None` or not specified, it defaults to '=='.
One of ``plugin_type``, ``name``, or ``version`` must be specified.
If any are unspecified or :py:data:`None`, they are treated as a wildcard.
In order to be more compatible with parsed text,
the order of ``operator`` and ``version`` can be swapped. The following are equivalent:
.. code-block:: python
BlacklistEntry('parser', 'json', '1.0', '>=')
.. code-block:: python
BlacklistEntry('parser', 'json', '>=', '1.0')
``version`` is evaluated using :py:func:`pkg_resources.parse_version`
and should conform to `PEP 440`_
.. _PEP 440: https://www.python.org/dev/peps/pep-0440/
"""
__slots__ = ('type', 'name', 'version', 'operator')
def __init__(self, plugin_type=None, name=None, version=None, operator=None):
if plugin_type is name is version is None:
raise AttributeError('plugin_type, name, or version must be specified')
self.type = plugin_type
self.name = name
if version in OPERATORS:
self.operator = version
self.version = operator
if self.version is None:
raise AttributeError('version must be specifed when operator is specified')
else:
self.version = version
self.operator = operator
if self.version is not None and not isinstance(self.version, BASESTRING):
raise TypeError('version must be a string, received %s' % type(self.version).__name__)
if self.operator is None:
self.operator = '=='
elif self.operator not in OPERATORS:
raise AttributeError("Unsupported operator '%s'" % self.operator)
def __repr__(self):
attrs = (self.type, self.name, self.operator, self.version)
return '%s(%s)' % (self.__class__.__name__, ', '.join([repr(attr) for attr in attrs]))
|
(plugin_type=None, name=None, version=None, operator=None)
|
718,587 |
pluginlib._objects
|
__init__
| null |
def __init__(self, plugin_type=None, name=None, version=None, operator=None):
if plugin_type is name is version is None:
raise AttributeError('plugin_type, name, or version must be specified')
self.type = plugin_type
self.name = name
if version in OPERATORS:
self.operator = version
self.version = operator
if self.version is None:
raise AttributeError('version must be specifed when operator is specified')
else:
self.version = version
self.operator = operator
if self.version is not None and not isinstance(self.version, BASESTRING):
raise TypeError('version must be a string, received %s' % type(self.version).__name__)
if self.operator is None:
self.operator = '=='
elif self.operator not in OPERATORS:
raise AttributeError("Unsupported operator '%s'" % self.operator)
|
(self, plugin_type=None, name=None, version=None, operator=None)
|
718,588 |
pluginlib._objects
|
__repr__
| null |
def __repr__(self):
attrs = (self.type, self.name, self.operator, self.version)
return '%s(%s)' % (self.__class__.__name__, ', '.join([repr(attr) for attr in attrs]))
|
(self)
|
718,589 |
pluginlib.exceptions
|
EntryPointWarning
|
Warning for errors with importing entry points
Subclass of :py:exc:`ImportWarning`
|
class EntryPointWarning(ImportWarning):
"""
Warning for errors with importing entry points
Subclass of :py:exc:`ImportWarning`
"""
| null |
718,590 |
pluginlib._parent
|
Parent
|
Args:
plugin_type(str): Plugin type
group(str): Group to store plugins
**Class Decorator for plugin parents**
``plugin_type`` determines under what attribute child plugins will be accessed in
:py:attr:`PluginLoader.plugins`.
When not specified, the class name is used.
``group`` specifies the parent and all child plugins are members of
the specified plugin group. A :py:attr:`PluginLoader` instance only accesses the
plugins group specified when it was initialized.
When not specified, the default group is used.
``group`` should be specified if plugins for different projects could be accessed
in an single program, such as in libraries and frameworks.
|
def _check_skipload(self):
"""
Determine if subclass should be skipped
_skipload_ is either a Boolean or callable that returns a Boolean
"""
# pylint: disable=protected-access
if callable(self.subclass._skipload_):
result = self.subclass._skipload_()
if isinstance(result, tuple):
skip, self.message = result
else:
skip = result
if skip:
self.errorcode = 156
elif self.subclass._skipload_:
self.errorcode = 50
self.message = 'Skipload flag is True'
|
(plugin_type=None, group=None)
|
718,591 |
pluginlib._parent
|
Plugin
|
**Mixin class for plugins.
All parents and child plugins will inherit from this class automatically.**
**Class Attributes**
*The following attributes can be set as class attributes in subclasses*
.. autoattribute:: _alias_
.. autoattribute:: _skipload_
.. autoattribute:: _version_
**Class Properties**
.. autoattribute:: name
:annotation:
:py:class:`str` -- :attr:`_alias_` if set or falls back to class name
.. autoattribute:: plugin_group
.. autoattribute:: plugin_type
.. autoattribute:: version
:annotation:
:py:class:`str` -- Returns :attr:`_version_` if set,
otherwise falls back to module ``__version__`` or :py:data:`None`
|
class Plugin(object):
"""
**Mixin class for plugins.
All parents and child plugins will inherit from this class automatically.**
**Class Attributes**
*The following attributes can be set as class attributes in subclasses*
.. autoattribute:: _alias_
.. autoattribute:: _skipload_
.. autoattribute:: _version_
**Class Properties**
.. autoattribute:: name
:annotation:
:py:class:`str` -- :attr:`_alias_` if set or falls back to class name
.. autoattribute:: plugin_group
.. autoattribute:: plugin_type
.. autoattribute:: version
:annotation:
:py:class:`str` -- Returns :attr:`_version_` if set,
otherwise falls back to module ``__version__`` or :py:data:`None`
"""
__slots__ = ()
_alias_ = None
""":py:class:`str` -- Friendly name to refer to plugin.
Accessed through :attr:`~Plugin.name` property."""
_skipload_ = False
""":py:class:`bool` -- When True, plugin is not loaded.
Can also be a static or class method that returns a tuple ``(bool, message)``"""
_version_ = None
""":py:class:`str` -- Plugin version. Should adhere to `PEP 440`_.
Accessed through :attr:`~Plugin.version` property.
.. _PEP 440: https://www.python.org/dev/peps/pep-0440/"""
@ClassProperty
def version(cls): # noqa: N805 # pylint: disable=no-self-argument
"""
:py:class:Returns `str` -- Returns :attr:`_version_` if set,
otherwise falls back to module `__version__` or None
"""
return cls._version_ or getattr(sys.modules.get(cls.__module__, None),
'__version__', None)
@ClassProperty
def name(cls): # noqa: N805 # pylint: disable=no-self-argument
"""
:py:class:`str` -- :attr:`_alias_` if set or falls back to class name
"""
return cls._alias_ or cls.__name__ # pylint: disable=no-member
@ClassProperty
def plugin_type(cls): # noqa: N805 # pylint: disable=no-self-argument
"""
:py:class:`str` -- ``plugin_type`` of :py:class:`~pluginlib.Parent` class
"""
return cls._type_ # pylint: disable=no-member
@ClassProperty
def plugin_group(cls): # noqa: N805 # pylint: disable=no-self-argument
"""
:py:class:`str` -- ``group`` of :py:class:`~pluginlib.Parent` class
"""
return cls._group_ # pylint: disable=no-member
|
()
|
718,592 |
pluginlib.exceptions
|
PluginImportError
|
**Exception class for Pluginlib import errors**
Subclass of :py:exc:`PluginlibError`
**Custom Instance Attributes**
.. py:attribute:: friendly
:annotation: = None
:py:class:`str` -- May contain abbreviated traceback
When an exception is raised while importing a module, an attempt is made to create a
"friendly" version of the output with a traceback limited to the plugin itself
or, failing that, the loader module.
|
class PluginImportError(PluginlibError):
"""
**Exception class for Pluginlib import errors**
Subclass of :py:exc:`PluginlibError`
**Custom Instance Attributes**
.. py:attribute:: friendly
:annotation: = None
:py:class:`str` -- May contain abbreviated traceback
When an exception is raised while importing a module, an attempt is made to create a
"friendly" version of the output with a traceback limited to the plugin itself
or, failing that, the loader module.
"""
|
(*args, **kwargs)
|
718,593 |
pluginlib.exceptions
|
__init__
| null |
def __init__(self, *args, **kwargs):
super(PluginlibError, self).__init__(*args)
self.friendly = kwargs.get('friendly', None)
|
(self, *args, **kwargs)
|
718,594 |
pluginlib._loader
|
PluginLoader
|
Args:
group(str): Group to retrieve plugins from
library(str): Standard library package
modules(list): Iterable of modules to import recursively
paths(list): Iterable of paths to import recursively
entry_point(str): `Entry point`_ for additional plugins
blacklist(list): Iterable of :py:class:`BlacklistEntry` objects or tuples
prefix_package(str): Alternative prefix for imported packages
type_filter(list): Iterable of parent plugin types to allow
**Interface for importing and accessing plugins**
Plugins are loaded from sources specified at initialization when
:py:meth:`load_modules` is called or when the :py:attr:`plugins` property is first accessed.
``group`` specifies the group whose members will be returned by :py:attr:`plugins`
This corresponds directly with the ``group`` attribute for :py:func:`Parent`.
When not specified, the default group is used.
``group`` should be specified if plugins for different projects could be accessed
in an single program, such as in libraries and frameworks.
``library`` indicates the package of a program's standard library.
This should be a package which is always loaded.
``modules`` is an iterable of optional modules to load.
If a package is given, it will be loaded recursively.
``paths`` is an iterable of optional paths to find modules to load.
The paths are searched recursively and imported under the namespace specified by
``prefix_package``.
``entry_point`` specifies an `entry point <Entry point_>`_ group to identify
additional modules and packages which should be loaded.
``blacklist`` is an iterable containing :py:class:`BlacklistEntry` objects or tuples
with arguments for new :py:class:`BlacklistEntry` objects.
``prefix_package`` must be the name of an existing package under which to import the paths
specified in ``paths``. Because the package paths will be traversed recursively, this should
be an empty path.
``type_filter`` limits plugins types to only those specified. A specified type is not
guaranteed to be available.
.. _Entry point: https://packaging.python.org/specifications/entry-points/
|
class PluginLoader(object):
"""
Args:
group(str): Group to retrieve plugins from
library(str): Standard library package
modules(list): Iterable of modules to import recursively
paths(list): Iterable of paths to import recursively
entry_point(str): `Entry point`_ for additional plugins
blacklist(list): Iterable of :py:class:`BlacklistEntry` objects or tuples
prefix_package(str): Alternative prefix for imported packages
type_filter(list): Iterable of parent plugin types to allow
**Interface for importing and accessing plugins**
Plugins are loaded from sources specified at initialization when
:py:meth:`load_modules` is called or when the :py:attr:`plugins` property is first accessed.
``group`` specifies the group whose members will be returned by :py:attr:`plugins`
This corresponds directly with the ``group`` attribute for :py:func:`Parent`.
When not specified, the default group is used.
``group`` should be specified if plugins for different projects could be accessed
in an single program, such as in libraries and frameworks.
``library`` indicates the package of a program's standard library.
This should be a package which is always loaded.
``modules`` is an iterable of optional modules to load.
If a package is given, it will be loaded recursively.
``paths`` is an iterable of optional paths to find modules to load.
The paths are searched recursively and imported under the namespace specified by
``prefix_package``.
``entry_point`` specifies an `entry point <Entry point_>`_ group to identify
additional modules and packages which should be loaded.
``blacklist`` is an iterable containing :py:class:`BlacklistEntry` objects or tuples
with arguments for new :py:class:`BlacklistEntry` objects.
``prefix_package`` must be the name of an existing package under which to import the paths
specified in ``paths``. Because the package paths will be traversed recursively, this should
be an empty path.
``type_filter`` limits plugins types to only those specified. A specified type is not
guaranteed to be available.
.. _Entry point: https://packaging.python.org/specifications/entry-points/
"""
def __init__(self, group=None, library=None, modules=None, paths=None, entry_point=None,
blacklist=None, prefix_package='pluginlib.importer', type_filter=None):
# Make sure we got iterables
for argname, arg in (('modules', modules), ('paths', paths), ('blacklist', blacklist),
('type_filter', type_filter)):
if not isinstance(arg, (NoneType, Iterable)) or isinstance(arg, BASESTRING):
raise TypeError("Expecting iterable for '%s', received %s" % (argname, type(arg)))
# Make sure we got strings
for argname, arg in (('library', library), ('entry_point', entry_point),
('prefix_package', prefix_package)):
if not isinstance(arg, (NoneType, BASESTRING)):
raise TypeError("Expecting string for '%s', received %s" % (argname, type(arg)))
self.group = group or '_default'
self.library = library
self.modules = modules or tuple()
self.paths = paths or tuple()
self.entry_point = entry_point
self.prefix_package = prefix_package
self.type_filter = type_filter
self.loaded = False
if blacklist:
self.blacklist = []
for entry in blacklist:
if isinstance(entry, BlacklistEntry):
pass
elif isinstance(entry, Iterable):
try:
entry = BlacklistEntry(*entry)
except (AttributeError, TypeError) as e:
# pylint: disable=raise-missing-from
raise AttributeError("Invalid blacklist entry '%s': %s " % (entry, e))
else:
raise AttributeError("Invalid blacklist entry '%s': Not an iterable" % entry)
self.blacklist.append(entry)
self.blacklist = tuple(self.blacklist)
else:
self.blacklist = None
def __repr__(self):
args = []
for attr, default in (('group', '_default'), ('library', None), ('modules', None),
('paths', None), ('entry_point', None), ('blacklist', None),
('prefix_package', 'pluginlib.importer'), ('type_filter', None)):
val = getattr(self, attr)
if default and val == default:
continue
if val:
args.append('%s=%r' % (attr, val))
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def load_modules(self):
"""
Locate and import modules from locations specified during initialization.
Locations include:
- Program's standard library (``library``)
- `Entry points <Entry point_>`_ (``entry_point``)
- Specified modules (``modules``)
- Specified paths (``paths``)
If a malformed child plugin class is imported, a :py:exc:`PluginWarning` will be issued,
the class is skipped, and loading operations continue.
If an invalid `entry point <Entry point_>`_ is specified, an :py:exc:`EntryPointWarning`
is issued and loading operations continue.
"""
# Start with standard library
if self.library:
LOGGER.info('Loading plugins from standard library')
libmod = _import_module(self.library)
_recursive_import(libmod)
# Get entry points
if self.entry_point:
LOGGER.info('Loading plugins from entry points group %s', self.entry_point)
for epoint in iter_entry_points(group=self.entry_point):
try:
mod = _import_module(epoint)
except PluginImportError as e:
warnings.warn("Module %s can not be loaded for entry point %s: %s" %
(epoint.module_name, epoint.name, e), EntryPointWarning)
continue
# If we have a package, walk it
if ismodule(mod):
_recursive_import(mod)
else:
warnings.warn("Entry point '%s' is not a module or package" % epoint.name,
EntryPointWarning)
# Load auxiliary modules
if self.modules:
for mod in self.modules:
LOGGER.info('Loading plugins from %s', mod)
_recursive_import(_import_module(mod))
# Load auxiliary paths
if self.paths:
# Import each path recursively
for path in self.paths:
modpath = os.path.realpath(path)
if os.path.isdir(modpath):
LOGGER.info("Recursively importing plugins from path `%s`", path)
_recursive_path_import(path, self.prefix_package)
else:
LOGGER.info("Configured plugin path '%s' is not a valid directory", path)
self.loaded = True
@property
def plugins(self):
"""
Newest version of all plugins in the group filtered by ``blacklist``
Returns:
dict: Nested dictionary of plugins accessible through dot-notation.
Plugins are returned in a nested dictionary, but can also be accessed through dot-notion.
Just as when accessing an undefined dictionary key with index-notation,
a :py:exc:`KeyError` will be raised if the plugin type or plugin does not exist.
Parent types are always included.
Child plugins will only be included if a valid, non-blacklisted plugin is available.
"""
if not self.loaded:
self.load_modules()
# pylint: disable=protected-access
return get_plugins()[self.group]._filter(blacklist=self.blacklist, newest_only=True,
type_filter=self.type_filter)
@property
def plugins_all(self):
"""
All resulting versions of all plugins in the group filtered by ``blacklist``
Returns:
dict: Nested dictionary of plugins accessible through dot-notation.
Similar to :py:attr:`plugins`, but lowest level is an :py:class:`~collections.OrderedDict`
of all unfiltered plugin versions for the given plugin type and name.
Parent types are always included.
Child plugins will only be included if at least one valid, non-blacklisted plugin
is available.
The newest plugin can be retrieved by accessing the last item in the dictionary.
.. code-block:: python
plugins = loader.plugins_all
tuple(plugins.parser.json.values())[-1]
"""
if not self.loaded:
self.load_modules()
# pylint: disable=protected-access
return get_plugins()[self.group]._filter(blacklist=self.blacklist,
type_filter=self.type_filter)
def get_plugin(self, plugin_type, name, version=None):
"""
Args:
plugin_type(str): Parent type
name(str): Plugin name
version(str): Plugin version
Returns:
:py:class:`Plugin`: Plugin, or :py:data:`None` if plugin can't be found
Retrieve a specific plugin. ``blacklist`` and ``type_filter`` still apply.
If ``version`` is not specified, the newest available version is returned.
"""
if not self.loaded:
self.load_modules()
# pylint: disable=protected-access
return get_plugins()[self.group]._filter(blacklist=self.blacklist,
newest_only=True,
type_filter=self.type_filter,
type=plugin_type,
name=name,
version=version)
|
(group=None, library=None, modules=None, paths=None, entry_point=None, blacklist=None, prefix_package='pluginlib.importer', type_filter=None)
|
718,595 |
pluginlib._loader
|
__init__
| null |
def __init__(self, group=None, library=None, modules=None, paths=None, entry_point=None,
blacklist=None, prefix_package='pluginlib.importer', type_filter=None):
# Make sure we got iterables
for argname, arg in (('modules', modules), ('paths', paths), ('blacklist', blacklist),
('type_filter', type_filter)):
if not isinstance(arg, (NoneType, Iterable)) or isinstance(arg, BASESTRING):
raise TypeError("Expecting iterable for '%s', received %s" % (argname, type(arg)))
# Make sure we got strings
for argname, arg in (('library', library), ('entry_point', entry_point),
('prefix_package', prefix_package)):
if not isinstance(arg, (NoneType, BASESTRING)):
raise TypeError("Expecting string for '%s', received %s" % (argname, type(arg)))
self.group = group or '_default'
self.library = library
self.modules = modules or tuple()
self.paths = paths or tuple()
self.entry_point = entry_point
self.prefix_package = prefix_package
self.type_filter = type_filter
self.loaded = False
if blacklist:
self.blacklist = []
for entry in blacklist:
if isinstance(entry, BlacklistEntry):
pass
elif isinstance(entry, Iterable):
try:
entry = BlacklistEntry(*entry)
except (AttributeError, TypeError) as e:
# pylint: disable=raise-missing-from
raise AttributeError("Invalid blacklist entry '%s': %s " % (entry, e))
else:
raise AttributeError("Invalid blacklist entry '%s': Not an iterable" % entry)
self.blacklist.append(entry)
self.blacklist = tuple(self.blacklist)
else:
self.blacklist = None
|
(self, group=None, library=None, modules=None, paths=None, entry_point=None, blacklist=None, prefix_package='pluginlib.importer', type_filter=None)
|
718,596 |
pluginlib._loader
|
__repr__
| null |
def __repr__(self):
args = []
for attr, default in (('group', '_default'), ('library', None), ('modules', None),
('paths', None), ('entry_point', None), ('blacklist', None),
('prefix_package', 'pluginlib.importer'), ('type_filter', None)):
val = getattr(self, attr)
if default and val == default:
continue
if val:
args.append('%s=%r' % (attr, val))
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
|
(self)
|
718,597 |
pluginlib._loader
|
get_plugin
|
Args:
plugin_type(str): Parent type
name(str): Plugin name
version(str): Plugin version
Returns:
:py:class:`Plugin`: Plugin, or :py:data:`None` if plugin can't be found
Retrieve a specific plugin. ``blacklist`` and ``type_filter`` still apply.
If ``version`` is not specified, the newest available version is returned.
|
def get_plugin(self, plugin_type, name, version=None):
"""
Args:
plugin_type(str): Parent type
name(str): Plugin name
version(str): Plugin version
Returns:
:py:class:`Plugin`: Plugin, or :py:data:`None` if plugin can't be found
Retrieve a specific plugin. ``blacklist`` and ``type_filter`` still apply.
If ``version`` is not specified, the newest available version is returned.
"""
if not self.loaded:
self.load_modules()
# pylint: disable=protected-access
return get_plugins()[self.group]._filter(blacklist=self.blacklist,
newest_only=True,
type_filter=self.type_filter,
type=plugin_type,
name=name,
version=version)
|
(self, plugin_type, name, version=None)
|
718,598 |
pluginlib._loader
|
load_modules
|
Locate and import modules from locations specified during initialization.
Locations include:
- Program's standard library (``library``)
- `Entry points <Entry point_>`_ (``entry_point``)
- Specified modules (``modules``)
- Specified paths (``paths``)
If a malformed child plugin class is imported, a :py:exc:`PluginWarning` will be issued,
the class is skipped, and loading operations continue.
If an invalid `entry point <Entry point_>`_ is specified, an :py:exc:`EntryPointWarning`
is issued and loading operations continue.
|
def load_modules(self):
"""
Locate and import modules from locations specified during initialization.
Locations include:
- Program's standard library (``library``)
- `Entry points <Entry point_>`_ (``entry_point``)
- Specified modules (``modules``)
- Specified paths (``paths``)
If a malformed child plugin class is imported, a :py:exc:`PluginWarning` will be issued,
the class is skipped, and loading operations continue.
If an invalid `entry point <Entry point_>`_ is specified, an :py:exc:`EntryPointWarning`
is issued and loading operations continue.
"""
# Start with standard library
if self.library:
LOGGER.info('Loading plugins from standard library')
libmod = _import_module(self.library)
_recursive_import(libmod)
# Get entry points
if self.entry_point:
LOGGER.info('Loading plugins from entry points group %s', self.entry_point)
for epoint in iter_entry_points(group=self.entry_point):
try:
mod = _import_module(epoint)
except PluginImportError as e:
warnings.warn("Module %s can not be loaded for entry point %s: %s" %
(epoint.module_name, epoint.name, e), EntryPointWarning)
continue
# If we have a package, walk it
if ismodule(mod):
_recursive_import(mod)
else:
warnings.warn("Entry point '%s' is not a module or package" % epoint.name,
EntryPointWarning)
# Load auxiliary modules
if self.modules:
for mod in self.modules:
LOGGER.info('Loading plugins from %s', mod)
_recursive_import(_import_module(mod))
# Load auxiliary paths
if self.paths:
# Import each path recursively
for path in self.paths:
modpath = os.path.realpath(path)
if os.path.isdir(modpath):
LOGGER.info("Recursively importing plugins from path `%s`", path)
_recursive_path_import(path, self.prefix_package)
else:
LOGGER.info("Configured plugin path '%s' is not a valid directory", path)
self.loaded = True
|
(self)
|
718,599 |
pluginlib.exceptions
|
PluginWarning
|
Warning for errors with imported plugins
Subclass of :py:exc:`UserWarning`
|
class PluginWarning(UserWarning):
"""
Warning for errors with imported plugins
Subclass of :py:exc:`UserWarning`
"""
| null |
718,600 |
pluginlib.exceptions
|
PluginlibError
|
**Base exception class for Pluginlib exceptions**
All Pluginlib exceptions are derived from this class.
Subclass of :py:exc:`Exception`
**Custom Instance Attributes**
.. py:attribute:: friendly
:annotation: = None
:py:class:`str` -- Optional friendly output
|
class PluginlibError(Exception):
"""
**Base exception class for Pluginlib exceptions**
All Pluginlib exceptions are derived from this class.
Subclass of :py:exc:`Exception`
**Custom Instance Attributes**
.. py:attribute:: friendly
:annotation: = None
:py:class:`str` -- Optional friendly output
"""
def __init__(self, *args, **kwargs):
super(PluginlibError, self).__init__(*args)
self.friendly = kwargs.get('friendly', None)
|
(*args, **kwargs)
|
718,606 |
pluginlib._util
|
abstractattribute
|
A class to be used to identify abstract attributes
.. code-block:: python
@pluginlib.Parent
class ParentClass(object):
abstract_attribute = pluginlib.abstractattribute
|
class abstractattribute(object): # noqa: N801 # pylint: disable=invalid-name
"""
A class to be used to identify abstract attributes
.. code-block:: python
@pluginlib.Parent
class ParentClass(object):
abstract_attribute = pluginlib.abstractattribute
"""
__isabstractmethod__ = True
|
()
|
718,607 |
pluginlib._util
|
abstractclassmethod
|
A decorator for abstract class methods
Used in parent classes to identify class methods required in child plugins
This decorator is included to support older versions of Python and
should be considered deprecated as of Python 3.3.
The preferred implementation is:
.. code-block:: python
@classmethod
@pluginlib.abstractmethod
def abstract_classmethod(cls):
return cls.foo
|
class abstractclassmethod(classmethod): # noqa: N801 # pylint: disable=invalid-name
"""
A decorator for abstract class methods
Used in parent classes to identify class methods required in child plugins
This decorator is included to support older versions of Python and
should be considered deprecated as of Python 3.3.
The preferred implementation is:
.. code-block:: python
@classmethod
@pluginlib.abstractmethod
def abstract_classmethod(cls):
return cls.foo
"""
__isabstractmethod__ = True
def __init__(self, func):
super(abstractclassmethod, self).__init__(abstractmethod(func))
| null |
718,608 |
pluginlib._util
|
__init__
| null |
def __init__(self, func):
super(abstractclassmethod, self).__init__(abstractmethod(func))
|
(self, func)
|
718,610 |
abc
|
abstractproperty
|
A decorator indicating abstract properties.
Deprecated, use 'property' with 'abstractmethod' instead:
class C(ABC):
@property
@abstractmethod
def my_abstract_property(self):
...
|
class abstractproperty(property):
"""A decorator indicating abstract properties.
Deprecated, use 'property' with 'abstractmethod' instead:
class C(ABC):
@property
@abstractmethod
def my_abstract_property(self):
...
"""
__isabstractmethod__ = True
|
(fget=None, fset=None, fdel=None, doc=None)
|
718,611 |
pluginlib._util
|
abstractstaticmethod
|
A decorator for abstract static methods
Used in parent classes to identify static methods required in child plugins
This decorator is included to support older versions of Python and
should be considered deprecated as of Python 3.3.
The preferred implementation is:
.. code-block:: python
@staticmethod
@pluginlib.abstractmethod
def abstract_staticmethod():
return 'foo'
|
class abstractstaticmethod(staticmethod): # noqa: N801 # pylint: disable=invalid-name
"""
A decorator for abstract static methods
Used in parent classes to identify static methods required in child plugins
This decorator is included to support older versions of Python and
should be considered deprecated as of Python 3.3.
The preferred implementation is:
.. code-block:: python
@staticmethod
@pluginlib.abstractmethod
def abstract_staticmethod():
return 'foo'
"""
__isabstractmethod__ = True
def __init__(self, func):
super(abstractstaticmethod, self).__init__(abstractmethod(func))
| null |
718,612 |
pluginlib._util
|
__init__
| null |
def __init__(self, func):
super(abstractstaticmethod, self).__init__(abstractmethod(func))
|
(self, func)
|
718,614 |
autopage
|
AutoPager
|
A context manager that launches a pager for the output if appropriate.
If the output stream is not to the console (i.e. it is piped or
redirected), no pager will be launched.
|
class AutoPager:
"""
A context manager that launches a pager for the output if appropriate.
If the output stream is not to the console (i.e. it is piped or
redirected), no pager will be launched.
"""
def __init__(self,
output_stream: Optional[TextIO] = None, *,
pager_command: command.CommandType = command.DefaultPager(),
allow_color: bool = True,
line_buffering: Optional[bool] = None,
reset_on_exit: bool = False,
errors: Optional[ErrorStrategy] = None):
self._use_stdout = output_stream is None or output_stream is sys.stdout
self._out = sys.stdout if output_stream is None else output_stream
self._tty = (not self._out.closed) and self._out.isatty()
self._command = command.get_pager_command(pager_command)
self._config = command.PagerConfig(
color=allow_color,
line_buffering_requested=bool(line_buffering),
reset_terminal=reset_on_exit,
)
self._set_line_buffering = line_buffering
self._set_errors = (ErrorStrategy(errors) if errors is not None
else None)
self._pager: Optional[subprocess.Popen] = None
self._exit_code = 0
def to_terminal(self) -> bool:
"""Return whether the output stream is a terminal."""
return self._tty
def __enter__(self) -> TextIO:
# Only invoke the pager if the output is going to a tty; if it is
# being sent to a file or pipe then we don't want the pager involved
if self.to_terminal() and self._command.command() != ['cat']:
try:
return self._paged_stream()
except OSError:
pass
self._reconfigure_output_stream()
return self._out
def _line_buffering(self) -> bool:
if self._set_line_buffering is None:
return getattr(self._out, 'line_buffering', self._tty)
return self._set_line_buffering
def _encoding(self) -> str:
return getattr(self._out, 'encoding', 'ascii')
def _errors(self) -> str:
if self._set_errors is None:
return getattr(self._out, 'errors', ErrorStrategy.STRICT.value)
return self._set_errors.value
def _reconfigure_output_stream(self) -> None:
if self._set_line_buffering is None and self._set_errors is None:
return
if not isinstance(self._out, io.TextIOWrapper):
return
# Python 3.7 & later
if hasattr(self._out, 'reconfigure'):
self._out.reconfigure(line_buffering=self._set_line_buffering,
errors=(self._set_errors.value
if self._set_errors is not None
else None))
# Python 3.6
elif (self._out.line_buffering != self._line_buffering()
or self._out.errors != self._errors()):
# Pure-python I/O
if (hasattr(self._out, '_line_buffering')
and hasattr(self._out, '_errors')):
py_out = typing.cast(Any, self._out)
py_out._line_buffering = self._line_buffering()
py_out._errors = self._errors()
py_out.flush()
# Native C I/O
else:
encoding = self._encoding()
errors = self._errors()
line_buffering = self._line_buffering()
try:
if self._use_stdout:
sys.stdout = typing.cast(TextIO, None)
newstream = io.TextIOWrapper(
self._out.detach(),
line_buffering=line_buffering,
encoding=encoding,
errors=errors)
self._out = newstream
finally:
if self._use_stdout:
sys.stdout = self._out
def _pager_env(self) -> Optional[Dict[str, str]]:
new_vars = self._command.environment_variables(self._config)
if not new_vars:
return None
env = dict(os.environ)
env.update(new_vars)
return env
def _pager_out_stream(self) -> Optional[TextIO]:
if not self._use_stdout:
try:
# Ensure the output stream has a file descriptor
self._out.fileno()
except OSError:
pass
else:
return self._out
return None
def _paged_stream(self) -> TextIO:
buffer_size = 1 if self._line_buffering() else -1
self._pager = subprocess.Popen(self._command.command(),
env=self._pager_env(),
bufsize=buffer_size,
universal_newlines=True,
encoding=self._encoding(),
errors=self._errors(),
stdin=subprocess.PIPE,
stdout=self._pager_out_stream())
assert self._pager.stdin is not None
return typing.cast(TextIO, self._pager.stdin)
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
traceback: Optional[types.TracebackType]) -> bool:
if self._pager is not None:
# Pager ignores Ctrl-C, so we should too
with _sigint_ignore():
pager_in = self._pager.stdin
assert pager_in is not None
try:
pager_in.close()
except BrokenPipeError:
# Other end of pipe already closed
self._exit_code = _signal_exit_code(signal.SIGPIPE)
# Wait for user to exit pager
self._pager.wait()
else:
self._flush_output()
return self._process_exception(exc)
def _flush_output(self) -> None:
try:
if not self._out.closed:
self._out.flush()
except BrokenPipeError:
self._exit_code = _signal_exit_code(signal.SIGPIPE)
try:
# Other end of pipe already closed, so close the stream now
# and handle the error. If we leave the stream open with
# unflushed data, then it will print an unhandleable
# exception during Python's interpreter shutdown.
self._out.close()
except BrokenPipeError:
# This will always happen
pass
def _process_exception(self, exc: Optional[BaseException]) -> bool:
if exc is not None:
if isinstance(exc, BrokenPipeError):
self._exit_code = _signal_exit_code(signal.SIGPIPE)
# Suppress exceptions caused by a broken pipe (indicating that
# the user has exited the pager, or the following process in
# the pipeline has exited)
return True
elif isinstance(exc, KeyboardInterrupt):
self._exit_code = _signal_exit_code(signal.SIGINT)
elif isinstance(exc, SystemExit) and isinstance(exc.code, int):
self._exit_code = exc.code
else:
self._exit_code = 1
return False
def exit_code(self) -> int:
"""
Return an appropriate exit code for the process based on any errors.
If the user exits the program prematurely by closing the pager, we may
want to return a different exit code for the process. This method
returns an appropriate exit code on the basis of the existence and type
of any uncaught exceptions.
"""
return self._exit_code
|
(output_stream: Optional[TextIO] = None, *, pager_command: Union[autopage.command.PagerCommand, Callable[[], autopage.command.PagerCommand], str, Sequence[str]] = <autopage.command.Less object at 0x7f2fc8e3abf0>, allow_color: bool = True, line_buffering: Optional[bool] = None, reset_on_exit: bool = False, errors: Optional[autopage.ErrorStrategy] = None)
|
718,615 |
autopage
|
__enter__
| null |
def __enter__(self) -> TextIO:
# Only invoke the pager if the output is going to a tty; if it is
# being sent to a file or pipe then we don't want the pager involved
if self.to_terminal() and self._command.command() != ['cat']:
try:
return self._paged_stream()
except OSError:
pass
self._reconfigure_output_stream()
return self._out
|
(self) -> <class 'TextIO'>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.