index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
714,683 |
pandas_redshift.core
|
s3_to_redshift
| null |
def s3_to_redshift(redshift_table_name, csv_name, delimiter=',', quotechar='"',
dateformat='auto', timeformat='auto', region='', parameters='', verbose=True):
bucket_name = 's3://{0}/{1}'.format(
s3_bucket_var, s3_subdirectory_var + csv_name)
if aws_1 and aws_2:
authorization = """
access_key_id '{0}'
secret_access_key '{1}'
""".format(aws_1, aws_2)
elif aws_role:
authorization = """
iam_role '{0}'
""".format(aws_role)
else:
authorization = ""
s3_to_sql = """
copy {0}
from '{1}'
delimiter '{2}'
ignoreheader 1
csv quote as '{3}'
dateformat '{4}'
timeformat '{5}'
{6}
{7}
""".format(redshift_table_name, bucket_name, delimiter, quotechar, dateformat,
timeformat, authorization, parameters)
if region:
s3_to_sql = s3_to_sql + "region '{0}'".format(region)
if aws_token != '':
s3_to_sql = s3_to_sql + "\n\tsession_token '{0}'".format(aws_token)
s3_to_sql = s3_to_sql + ';'
if verbose:
logger.info(mask_aws_credentials(s3_to_sql))
# send the file
logger.info('FILLING THE TABLE IN REDSHIFT')
try:
cursor.execute(s3_to_sql)
connect.commit()
except Exception as e:
logger.error(e)
traceback.print_exc(file=sys.stdout)
connect.rollback()
raise
|
(redshift_table_name, csv_name, delimiter=',', quotechar='"', dateformat='auto', timeformat='auto', region='', parameters='', verbose=True)
|
714,684 |
pandas_redshift.core
|
set_log_level
| null |
def set_log_level(level, mask_secrets=True):
log_level_map = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARN,
'error': logging.ERROR
}
logging_config['logger_level'] = log_level_map[level]
logger = logging.getLogger(__name__)
logger.setLevel(logging_config['logger_level'])
logging_config['mask_secrets'] = mask_secrets
|
(level, mask_secrets=True)
|
714,688 |
pandas_redshift.core
|
validate_column_names
|
Validate the column names to ensure no reserved words are used.
Arguments:
dataframe pd.data_frame -- data to validate
|
def validate_column_names(data_frame):
"""Validate the column names to ensure no reserved words are used.
Arguments:
dataframe pd.data_frame -- data to validate
"""
rrwords = open(os.path.join(os.path.dirname(__file__),
'redshift_reserve_words.txt'), 'r').readlines()
rrwords = [r.strip().lower() for r in rrwords]
data_frame.columns = [x.lower() for x in data_frame.columns]
for col in data_frame.columns:
try:
assert col not in rrwords
except AssertionError:
raise ValueError(
'DataFrame column name {0} is a reserve word in redshift'
.format(col))
# check for spaces in the column names
there_are_spaces = sum(
[re.search('\s', x) is not None for x in data_frame.columns]) > 0
# delimit them if there are
if there_are_spaces:
col_names_dict = {x: '"{0}"'.format(x) for x in data_frame.columns}
data_frame.rename(columns=col_names_dict, inplace=True)
return data_frame
|
(data_frame)
|
714,689 |
sqlalchemy_jsonfield.jsonfield
|
JSONField
|
Represent an immutable structure as a json-encoded string or json.
Usage::
JSONField(enforce_string=True|False, enforce_unicode=True|False)
|
class JSONField(sqlalchemy.types.TypeDecorator): # type: ignore[type-arg] # pylint: disable=abstract-method
"""Represent an immutable structure as a json-encoded string or json.
Usage::
JSONField(enforce_string=True|False, enforce_unicode=True|False)
"""
def process_literal_param(self, value: typing.Any, dialect: Dialect) -> typing.Any:
"""Re-use of process_bind_param.
:return: encoded value if required
:rtype: typing.Union[str, typing.Any]
"""
return self.process_bind_param(value, dialect)
impl = sqlalchemy.types.TypeEngine # Special placeholder
cache_ok = False # Cache complexity due to requerement of value re-serialization and mutability
def __init__( # pylint: disable=keyword-arg-before-vararg
self,
enforce_string: bool = False,
enforce_unicode: bool = False,
json: types.ModuleType | typing.Any = json, # pylint: disable=redefined-outer-name
json_type: TypeEngine[typing.Any] | type[TypeEngine[typing.Any]] = sqlalchemy.JSON,
*args: typing.Any,
**kwargs: typing.Any,
) -> None:
"""JSONField.
:param enforce_string: enforce String(UnicodeText) type usage
:type enforce_string: bool
:param enforce_unicode: do not encode non-ascii data
:type enforce_unicode: bool
:param json: JSON encoding/decoding library. By default: standard json package.
:param json_type: the sqlalchemy/dialect class that will be used to render the DB JSON type.
By default: sqlalchemy.JSON
:param args: extra baseclass arguments
:type args: typing.Any
:param kwargs: extra baseclass keyworded arguments
:type kwargs: typing.Any
"""
self.__enforce_string = enforce_string
self.__enforce_unicode = enforce_unicode
self.__json_codec = json
self.__json_type = json_type
super().__init__(*args, **kwargs)
def __use_json(self, dialect: Dialect) -> bool:
"""Helper to determine, which encoder to use.
:return: use engine-based json encoder
:rtype: bool
"""
return hasattr(dialect, "_json_serializer") and not self.__enforce_string
def load_dialect_impl(self, dialect: Dialect) -> TypeEngine[typing.Any]:
"""Select impl by dialect.
:return: dialect implementation depends on decoding method
:rtype: TypeEngine
"""
# types are handled by DefaultDialect, Dialect class is abstract
if self.__use_json(dialect):
return dialect.type_descriptor(self.__json_type) # type: ignore[arg-type]
return dialect.type_descriptor(sqlalchemy.UnicodeText) # type: ignore[arg-type]
def process_bind_param(self, value: typing.Any, dialect: Dialect) -> str | typing.Any:
"""Encode data, if required.
:return: encoded value if required
:rtype: typing.Union[str, typing.Any]
"""
if self.__use_json(dialect) or value is None:
return value
return self.__json_codec.dumps(value, ensure_ascii=not self.__enforce_unicode)
def process_result_value(self, value: str | typing.Any, dialect: Dialect) -> typing.Any:
"""Decode data, if required.
:return: decoded result value if required
:rtype: typing.Any
"""
if self.__use_json(dialect) or value is None:
return value
return self.__json_codec.loads(value)
|
(enforce_string: 'bool' = False, enforce_unicode: 'bool' = False, json: 'types.ModuleType | typing.Any' = <module 'json' from '/usr/local/lib/python3.10/json/__init__.py'>, json_type: 'TypeEngine[typing.Any] | type[TypeEngine[typing.Any]]' = <class 'sqlalchemy.sql.sqltypes.JSON'>, *args: 'typing.Any', **kwargs: 'typing.Any') -> 'None'
|
714,690 |
sqlalchemy_jsonfield.jsonfield
|
__use_json
|
Helper to determine, which encoder to use.
:return: use engine-based json encoder
:rtype: bool
|
def __use_json(self, dialect: Dialect) -> bool:
"""Helper to determine, which encoder to use.
:return: use engine-based json encoder
:rtype: bool
"""
return hasattr(dialect, "_json_serializer") and not self.__enforce_string
|
(self, dialect: 'Dialect') -> 'bool'
|
714,692 |
sqlalchemy_jsonfield.jsonfield
|
__init__
|
JSONField.
:param enforce_string: enforce String(UnicodeText) type usage
:type enforce_string: bool
:param enforce_unicode: do not encode non-ascii data
:type enforce_unicode: bool
:param json: JSON encoding/decoding library. By default: standard json package.
:param json_type: the sqlalchemy/dialect class that will be used to render the DB JSON type.
By default: sqlalchemy.JSON
:param args: extra baseclass arguments
:type args: typing.Any
:param kwargs: extra baseclass keyworded arguments
:type kwargs: typing.Any
|
def __init__( # pylint: disable=keyword-arg-before-vararg
self,
enforce_string: bool = False,
enforce_unicode: bool = False,
json: types.ModuleType | typing.Any = json, # pylint: disable=redefined-outer-name
json_type: TypeEngine[typing.Any] | type[TypeEngine[typing.Any]] = sqlalchemy.JSON,
*args: typing.Any,
**kwargs: typing.Any,
) -> None:
"""JSONField.
:param enforce_string: enforce String(UnicodeText) type usage
:type enforce_string: bool
:param enforce_unicode: do not encode non-ascii data
:type enforce_unicode: bool
:param json: JSON encoding/decoding library. By default: standard json package.
:param json_type: the sqlalchemy/dialect class that will be used to render the DB JSON type.
By default: sqlalchemy.JSON
:param args: extra baseclass arguments
:type args: typing.Any
:param kwargs: extra baseclass keyworded arguments
:type kwargs: typing.Any
"""
self.__enforce_string = enforce_string
self.__enforce_unicode = enforce_unicode
self.__json_codec = json
self.__json_type = json_type
super().__init__(*args, **kwargs)
|
(self, enforce_string: 'bool' = False, enforce_unicode: 'bool' = False, json: 'types.ModuleType | typing.Any' = <module 'json' from '/usr/local/lib/python3.10/json/__init__.py'>, json_type: 'TypeEngine[typing.Any] | type[TypeEngine[typing.Any]]' = <class 'sqlalchemy.sql.sqltypes.JSON'>, *args: 'typing.Any', **kwargs: 'typing.Any') -> 'None'
|
714,725 |
sqlalchemy_jsonfield.jsonfield
|
load_dialect_impl
|
Select impl by dialect.
:return: dialect implementation depends on decoding method
:rtype: TypeEngine
|
def load_dialect_impl(self, dialect: Dialect) -> TypeEngine[typing.Any]:
"""Select impl by dialect.
:return: dialect implementation depends on decoding method
:rtype: TypeEngine
"""
# types are handled by DefaultDialect, Dialect class is abstract
if self.__use_json(dialect):
return dialect.type_descriptor(self.__json_type) # type: ignore[arg-type]
return dialect.type_descriptor(sqlalchemy.UnicodeText) # type: ignore[arg-type]
|
(self, dialect: 'Dialect') -> 'TypeEngine[typing.Any]'
|
714,726 |
sqlalchemy_jsonfield.jsonfield
|
process_bind_param
|
Encode data, if required.
:return: encoded value if required
:rtype: typing.Union[str, typing.Any]
|
def process_bind_param(self, value: typing.Any, dialect: Dialect) -> str | typing.Any:
"""Encode data, if required.
:return: encoded value if required
:rtype: typing.Union[str, typing.Any]
"""
if self.__use_json(dialect) or value is None:
return value
return self.__json_codec.dumps(value, ensure_ascii=not self.__enforce_unicode)
|
(self, value: 'typing.Any', dialect: 'Dialect') -> 'str | typing.Any'
|
714,727 |
sqlalchemy_jsonfield.jsonfield
|
process_literal_param
|
Re-use of process_bind_param.
:return: encoded value if required
:rtype: typing.Union[str, typing.Any]
|
def process_literal_param(self, value: typing.Any, dialect: Dialect) -> typing.Any:
"""Re-use of process_bind_param.
:return: encoded value if required
:rtype: typing.Union[str, typing.Any]
"""
return self.process_bind_param(value, dialect)
|
(self, value: 'typing.Any', dialect: 'Dialect') -> 'typing.Any'
|
714,728 |
sqlalchemy_jsonfield.jsonfield
|
process_result_value
|
Decode data, if required.
:return: decoded result value if required
:rtype: typing.Any
|
def process_result_value(self, value: str | typing.Any, dialect: Dialect) -> typing.Any:
"""Decode data, if required.
:return: decoded result value if required
:rtype: typing.Any
"""
if self.__use_json(dialect) or value is None:
return value
return self.__json_codec.loads(value)
|
(self, value: 'str | typing.Any', dialect: 'Dialect') -> 'typing.Any'
|
714,734 |
sqlalchemy_jsonfield.jsonfield
|
mutable_json_field
|
Mutable JSONField creator.
:param enforce_string: enforce String(UnicodeText) type usage
:type enforce_string: bool
:param enforce_unicode: do not encode non-ascii data
:type enforce_unicode: bool
:param json: JSON encoding/decoding library.
By default: standard json package.
:param args: extra baseclass arguments
:type args: typing.Any
:param kwargs: extra baseclass keyworded arguments
:type kwargs: typing.Any
:return: Mutable JSONField via MutableDict.as_mutable
:rtype: JSONField
|
def mutable_json_field( # pylint: disable=keyword-arg-before-vararg, redefined-outer-name
enforce_string: bool = False,
enforce_unicode: bool = False,
json: types.ModuleType | typing.Any = json,
*args: typing.Any,
**kwargs: typing.Any,
) -> JSONField:
"""Mutable JSONField creator.
:param enforce_string: enforce String(UnicodeText) type usage
:type enforce_string: bool
:param enforce_unicode: do not encode non-ascii data
:type enforce_unicode: bool
:param json: JSON encoding/decoding library.
By default: standard json package.
:param args: extra baseclass arguments
:type args: typing.Any
:param kwargs: extra baseclass keyworded arguments
:type kwargs: typing.Any
:return: Mutable JSONField via MutableDict.as_mutable
:rtype: JSONField
"""
return sqlalchemy.ext.mutable.MutableDict.as_mutable( # type: ignore[return-value]
JSONField( # type: ignore[misc]
enforce_string=enforce_string,
enforce_unicode=enforce_unicode,
json=json,
*args, # noqa: B026
**kwargs,
)
)
|
(enforce_string: 'bool' = False, enforce_unicode: 'bool' = False, json: 'types.ModuleType | typing.Any' = <module 'json' from '/usr/local/lib/python3.10/json/__init__.py'>, *args: 'typing.Any', **kwargs: 'typing.Any') -> 'JSONField'
|
714,736 |
pytorch_optimizer.optimizer.a2grad
|
A2Grad
|
Optimal Adaptive and Accelerated Stochastic Gradient Descent.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: Optional[float]. learning rate. no needed.
:param beta: float. beta.
:param lips: float. Lipschitz constant.
:param rho: float. represents the degree of weighting decrease, a constant smoothing factor between 0 and 1.
:param variant: str. type of A2Grad optimizer. 'uni', 'inc', 'exp'.
|
class A2Grad(Optimizer, BaseOptimizer):
r"""Optimal Adaptive and Accelerated Stochastic Gradient Descent.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: Optional[float]. learning rate. no needed.
:param beta: float. beta.
:param lips: float. Lipschitz constant.
:param rho: float. represents the degree of weighting decrease, a constant smoothing factor between 0 and 1.
:param variant: str. type of A2Grad optimizer. 'uni', 'inc', 'exp'.
"""
def __init__(
self,
params: PARAMETERS,
lr: Optional[float] = None,
beta: float = 10.0,
lips: float = 10.0,
rho: float = 0.5,
variant: str = 'uni',
):
self.validate_learning_rate(lr)
self.validate_non_negative(lips, 'lips')
self.validate_non_negative(rho, 'rho')
self.validate_options(variant, 'variant', ['uni', 'inc', 'exp'])
self.variant = variant
defaults: DEFAULTS = {'beta': beta, 'lips': lips}
if variant == 'exp':
defaults.update({'rho': rho})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'A2Grad'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['alpha_k'] = 1.0
state['v_k'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
state['avg_grad'] = torch.zeros_like(p)
state['x_k'] = p.clone()
if self.variant == 'exp':
state['v_kk'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
gamma_k: float = 2.0 * group['lips'] / (group['step'] + 1)
alpha_k_1: float = 2.0 / (group['step'] + 3)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['alpha_k'] = 1.0
state['v_k'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
state['avg_grad'] = grad.clone()
state['x_k'] = p.clone()
if self.variant == 'exp':
state['v_kk'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
avg_grad = state['avg_grad']
avg_grad.add_(grad - avg_grad, alpha=group['step'] + 1)
delta_k = grad.clone()
delta_k.add_(avg_grad, alpha=-1.0)
delta_k_sq = delta_k.pow(2).sum()
v_k = state['v_k']
if self.variant in ('uni', 'inc'):
if self.variant == 'inc':
v_k.mul_((group['step'] / (group['step'] + 1)) ** 2)
v_k.add_(delta_k_sq)
else:
v_kk = state['v_kk']
v_kk.mul_(group['rho']).add_(delta_k_sq, alpha=1.0 - group['rho'])
torch.max(v_kk, v_k, out=v_k)
h_k = v_k.sqrt()
if self.variant != 'uni':
h_k.mul_(math.sqrt(group['step'] + 1))
coefficient = -1.0 / (gamma_k + group['beta'] * h_k.item())
x_k = state['x_k']
x_k.add_(grad, alpha=coefficient)
p.mul_(1.0 - alpha_k_1).add_(x_k, alpha=alpha_k_1)
p.add_(grad, alpha=(1.0 - alpha_k_1) * state['alpha_k'] * coefficient)
state['alpha_k'] = alpha_k_1
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: Optional[float] = None, beta: float = 10.0, lips: float = 10.0, rho: float = 0.5, variant: str = 'uni')
|
714,738 |
pytorch_optimizer.optimizer.a2grad
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: Optional[float] = None,
beta: float = 10.0,
lips: float = 10.0,
rho: float = 0.5,
variant: str = 'uni',
):
self.validate_learning_rate(lr)
self.validate_non_negative(lips, 'lips')
self.validate_non_negative(rho, 'rho')
self.validate_options(variant, 'variant', ['uni', 'inc', 'exp'])
self.variant = variant
defaults: DEFAULTS = {'beta': beta, 'lips': lips}
if variant == 'exp':
defaults.update({'rho': rho})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: Optional[float] = None, beta: float = 10.0, lips: float = 10.0, rho: float = 0.5, variant: str = 'uni')
|
714,741 |
pytorch_optimizer.optimizer.a2grad
|
__str__
| null |
def __str__(self) -> str:
return 'A2Grad'
|
(self) -> str
|
714,748 |
pytorch_optimizer.base.optimizer
|
apply_adam_debias
|
Apply AdamD variant.
:param adam_debias: bool. whether to apply AdamD.
:param step_size: float. step size.
:param bias_correction1: float. bias_correction.
|
@staticmethod
def apply_adam_debias(adam_debias: bool, step_size: float, bias_correction1: float) -> float:
r"""Apply AdamD variant.
:param adam_debias: bool. whether to apply AdamD.
:param step_size: float. step size.
:param bias_correction1: float. bias_correction.
"""
return step_size if adam_debias else step_size / bias_correction1
|
(adam_debias: bool, step_size: float, bias_correction1: float) -> float
|
714,749 |
pytorch_optimizer.base.optimizer
|
apply_ams_bound
|
Apply AMSBound variant.
:param ams_bound: bool. whether to apply AMSBound.
:param exp_avg_sq: torch.Tensor. exp_avg_sq.
:param max_exp_avg_sq: Optional[torch.Tensor]. max_exp_avg_sq.
:param eps: float. epsilon.
|
@staticmethod
def apply_ams_bound(
ams_bound: bool, exp_avg_sq: torch.Tensor, max_exp_avg_sq: Optional[torch.Tensor], eps: float
) -> torch.Tensor:
r"""Apply AMSBound variant.
:param ams_bound: bool. whether to apply AMSBound.
:param exp_avg_sq: torch.Tensor. exp_avg_sq.
:param max_exp_avg_sq: Optional[torch.Tensor]. max_exp_avg_sq.
:param eps: float. epsilon.
"""
if ams_bound:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
de_nom = max_exp_avg_sq.add(eps)
else:
de_nom = exp_avg_sq.add(eps)
return de_nom.sqrt_().add_(eps)
|
(ams_bound: bool, exp_avg_sq: torch.Tensor, max_exp_avg_sq: Optional[torch.Tensor], eps: float) -> torch.Tensor
|
714,750 |
pytorch_optimizer.base.optimizer
|
apply_weight_decay
|
Apply weight decay.
:param p: torch.Tensor. parameter.
:param grad: torch.Tensor. gradient.
:param lr: float. learning rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ratio: Optional[float]. scale weight decay.
|
@staticmethod
def apply_weight_decay(
p: torch.Tensor,
grad: Optional[torch.Tensor],
lr: float,
weight_decay: float,
weight_decouple: bool,
fixed_decay: bool,
ratio: Optional[float] = None,
):
r"""Apply weight decay.
:param p: torch.Tensor. parameter.
:param grad: torch.Tensor. gradient.
:param lr: float. learning rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ratio: Optional[float]. scale weight decay.
"""
if weight_decouple:
p.mul_(1.0 - weight_decay * (1.0 if fixed_decay else lr) * (ratio if ratio is not None else 1.0))
elif weight_decay > 0.0 and grad is not None:
grad.add_(p, alpha=weight_decay)
|
(p: torch.Tensor, grad: Optional[torch.Tensor], lr: float, weight_decay: float, weight_decouple: bool, fixed_decay: bool, ratio: Optional[float] = None)
|
714,751 |
pytorch_optimizer.base.optimizer
|
compute_hutchinson_hessian
|
Hutchinson's approximate hessian, added to the state under key `hessian`.
:param param_groups: PARAMETERS. parameter groups.
:param state: STATE. optimizer state.
:param num_samples: int. number of times to sample `z` for the approximation of the hessian trace.
:param alpha: float. alpha.
:param distribution: HUTCHINSON_G. type of distribution.
|
@staticmethod
def apply_weight_decay(
p: torch.Tensor,
grad: Optional[torch.Tensor],
lr: float,
weight_decay: float,
weight_decouple: bool,
fixed_decay: bool,
ratio: Optional[float] = None,
):
r"""Apply weight decay.
:param p: torch.Tensor. parameter.
:param grad: torch.Tensor. gradient.
:param lr: float. learning rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ratio: Optional[float]. scale weight decay.
"""
if weight_decouple:
p.mul_(1.0 - weight_decay * (1.0 if fixed_decay else lr) * (ratio if ratio is not None else 1.0))
elif weight_decay > 0.0 and grad is not None:
grad.add_(p, alpha=weight_decay)
|
(param_groups: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], state: Dict, num_samples: int = 1, alpha: float = 1.0, distribution: Literal['gaussian', 'rademacher'] = 'gaussian')
|
714,752 |
pytorch_optimizer.base.optimizer
|
get_adanorm_gradient
|
Get AdaNorm gradient.
:param grad: torch.Tensor. gradient.
:param adanorm: bool. whether to apply AdaNorm.
:param exp_grad_norm: Optional[torch.Tensor]. exp_grad_norm.
:param r: float. Optional[float]. momentum (ratio).
|
@staticmethod
def get_adanorm_gradient(
grad: torch.Tensor, adanorm: bool, exp_grad_norm: Optional[torch.Tensor] = None, r: Optional[float] = 0.95
) -> torch.Tensor:
r"""Get AdaNorm gradient.
:param grad: torch.Tensor. gradient.
:param adanorm: bool. whether to apply AdaNorm.
:param exp_grad_norm: Optional[torch.Tensor]. exp_grad_norm.
:param r: float. Optional[float]. momentum (ratio).
"""
if not adanorm:
return grad
grad_norm = torch.linalg.norm(grad)
exp_grad_norm.mul_(r).add_(grad_norm, alpha=1.0 - r)
return grad * exp_grad_norm / grad_norm if exp_grad_norm > grad_norm else grad
|
(grad: torch.Tensor, adanorm: bool, exp_grad_norm: Optional[torch.Tensor] = None, r: Optional[float] = 0.95) -> torch.Tensor
|
714,753 |
pytorch_optimizer.base.optimizer
|
get_rectify_step_size
|
Get step size for rectify optimizer.
:param is_rectify: bool. whether to apply rectify-variant.
:param step: int. number of steps.
:param lr: float. learning rate.
:param beta2: float. beta2.
:param n_sma_threshold: float. SMA threshold.
:param degenerated_to_sgd: bool. degenerated to SGD.
|
@staticmethod
def get_rectify_step_size(
is_rectify: bool,
step: int,
lr: float,
beta2: float,
n_sma_threshold: int,
degenerated_to_sgd: bool,
) -> Tuple[float, float]:
r"""Get step size for rectify optimizer.
:param is_rectify: bool. whether to apply rectify-variant.
:param step: int. number of steps.
:param lr: float. learning rate.
:param beta2: float. beta2.
:param n_sma_threshold: float. SMA threshold.
:param degenerated_to_sgd: bool. degenerated to SGD.
"""
step_size: float = lr
n_sma: float = 0.0
if is_rectify:
n_sma_max: float = 2.0 / (1.0 - beta2) - 1.0
beta2_t: float = beta2 ** step # fmt: skip
n_sma: float = n_sma_max - 2 * step * beta2_t / (1.0 - beta2_t)
if n_sma >= n_sma_threshold:
rt = math.sqrt(
(1.0 - beta2_t) * (n_sma - 4) / (n_sma_max - 4) * (n_sma - 2) / n_sma * n_sma_max / (n_sma_max - 2)
)
elif degenerated_to_sgd:
rt = 1.0
else:
rt = -1.0
step_size *= rt
return step_size, n_sma
|
(is_rectify: bool, step: int, lr: float, beta2: float, n_sma_threshold: int, degenerated_to_sgd: bool) -> Tuple[float, float]
|
714,762 |
pytorch_optimizer.optimizer.a2grad
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
gamma_k: float = 2.0 * group['lips'] / (group['step'] + 1)
alpha_k_1: float = 2.0 / (group['step'] + 3)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['alpha_k'] = 1.0
state['v_k'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
state['avg_grad'] = grad.clone()
state['x_k'] = p.clone()
if self.variant == 'exp':
state['v_kk'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
avg_grad = state['avg_grad']
avg_grad.add_(grad - avg_grad, alpha=group['step'] + 1)
delta_k = grad.clone()
delta_k.add_(avg_grad, alpha=-1.0)
delta_k_sq = delta_k.pow(2).sum()
v_k = state['v_k']
if self.variant in ('uni', 'inc'):
if self.variant == 'inc':
v_k.mul_((group['step'] / (group['step'] + 1)) ** 2)
v_k.add_(delta_k_sq)
else:
v_kk = state['v_kk']
v_kk.mul_(group['rho']).add_(delta_k_sq, alpha=1.0 - group['rho'])
torch.max(v_kk, v_k, out=v_k)
h_k = v_k.sqrt()
if self.variant != 'uni':
h_k.mul_(math.sqrt(group['step'] + 1))
coefficient = -1.0 / (gamma_k + group['beta'] * h_k.item())
x_k = state['x_k']
x_k.add_(grad, alpha=coefficient)
p.mul_(1.0 - alpha_k_1).add_(x_k, alpha=alpha_k_1)
p.add_(grad, alpha=(1.0 - alpha_k_1) * state['alpha_k'] * coefficient)
state['alpha_k'] = alpha_k_1
return loss
|
(self)
|
714,763 |
pytorch_optimizer.base.optimizer
|
set_hessian
|
Set hessian to state from external source. Generally useful when using functorch as a base.
Example:
-------
Here's an example::
# Hutchinson's Estimator using HVP
noise = tree_map(lambda v: torch.randn_like(v), params)
loss_, hvp_est = jvp(grad(run_model_fn), (params,), (noise,))
hessian_diag_est = tree_map(lambda a, b: a * b, hvp_est, noise)
optimizer.set_hessian(hessian_diag_est)
# OR
optimizer.step(hessian=hessian_diag_est)
:param param_groups: PARAMETERS. parameter groups.
:param state: STATE. optimizer state.
:param hessian: List[torch.Tensor]. sequence of hessian to set.
|
@staticmethod
def apply_weight_decay(
p: torch.Tensor,
grad: Optional[torch.Tensor],
lr: float,
weight_decay: float,
weight_decouple: bool,
fixed_decay: bool,
ratio: Optional[float] = None,
):
r"""Apply weight decay.
:param p: torch.Tensor. parameter.
:param grad: torch.Tensor. gradient.
:param lr: float. learning rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ratio: Optional[float]. scale weight decay.
"""
if weight_decouple:
p.mul_(1.0 - weight_decay * (1.0 if fixed_decay else lr) * (ratio if ratio is not None else 1.0))
elif weight_decay > 0.0 and grad is not None:
grad.add_(p, alpha=weight_decay)
|
(param_groups: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], state: Dict, hessian: List[torch.Tensor])
|
714,766 |
pytorch_optimizer.base.optimizer
|
validate_betas
| null |
def validate_betas(self, betas: BETAS):
self.validate_range(betas[0], 'beta1', 0.0, 1.0, range_type='[]')
self.validate_range(betas[1], 'beta2', 0.0, 1.0, range_type='[]')
if len(betas) < 3:
return
if betas[2] is not None:
self.validate_range(betas[2], 'beta3', 0.0, 1.0, range_type='[]')
|
(self, betas: Union[Tuple[float, float], Tuple[float, float, float]])
|
714,767 |
pytorch_optimizer.base.optimizer
|
validate_boundary
| null |
@staticmethod
def validate_boundary(constant: float, boundary: float, bound_type: str = 'upper'):
if bound_type == 'upper' and constant > boundary:
raise ValueError(f'[-] constant {constant} must be in a range of (-inf, {boundary}]')
if bound_type == 'lower' and constant < boundary:
raise ValueError(f'[-] constant {constant} must be in a range of [{boundary}, inf)')
|
(constant: float, boundary: float, bound_type: str = 'upper')
|
714,768 |
pytorch_optimizer.base.optimizer
|
validate_learning_rate
| null |
@staticmethod
def validate_learning_rate(learning_rate: Optional[float]):
if learning_rate is not None and learning_rate < 0.0:
raise NegativeLRError(learning_rate)
|
(learning_rate: Optional[float])
|
714,769 |
pytorch_optimizer.base.optimizer
|
validate_non_negative
| null |
@staticmethod
def validate_non_negative(x: Optional[float], name: str):
if x is not None and x < 0.0:
raise ValueError(f'[-] {name} must be non-negative')
|
(x: Optional[float], name: str)
|
714,770 |
pytorch_optimizer.base.optimizer
|
validate_nus
| null |
def validate_nus(self, nus: Union[float, Tuple[float, float]]):
if isinstance(nus, float):
self.validate_range(nus, 'nu', 0.0, 1.0, range_type='[]')
else:
self.validate_range(nus[0], 'nu1', 0.0, 1.0, range_type='[]')
self.validate_range(nus[1], 'nu2', 0.0, 1.0, range_type='[]')
|
(self, nus: Union[float, Tuple[float, float]])
|
714,771 |
pytorch_optimizer.base.optimizer
|
validate_options
| null |
@staticmethod
def validate_options(x: str, name: str, options: List[str]):
if x not in options:
opts: str = ' or '.join([f'\'{option}\'' for option in options]).strip()
raise ValueError(f'[-] {name} {x} must be one of ({opts})')
|
(x: str, name: str, options: List[str])
|
714,772 |
pytorch_optimizer.base.optimizer
|
validate_positive
| null |
@staticmethod
def validate_positive(x: Union[float, int], name: str):
if x <= 0:
raise ValueError(f'[-] {name} must be positive')
|
(x: Union[float, int], name: str)
|
714,773 |
pytorch_optimizer.base.optimizer
|
validate_range
| null |
@staticmethod
def validate_range(x: float, name: str, low: float, high: float, range_type: str = '[)'):
if range_type == '[)' and not low <= x < high:
raise ValueError(f'[-] {name} must be in the range [{low}, {high})')
if range_type == '[]' and not low <= x <= high:
raise ValueError(f'[-] {name} must be in the range [{low}, {high}]')
if range_type == '(]' and not low < x <= high:
raise ValueError(f'[-] {name} must be in the range ({low}, {high}]')
if range_type == '()' and not low < x < high:
raise ValueError(f'[-] {name} must be in the range ({low}, {high})')
|
(x: float, name: str, low: float, high: float, range_type: str = '[)')
|
714,774 |
pytorch_optimizer.base.optimizer
|
validate_step
| null |
@staticmethod
def validate_step(step: int, step_type: str):
if step < 1:
raise NegativeStepError(step, step_type=step_type)
|
(step: int, step_type: str)
|
714,776 |
pytorch_optimizer.base.optimizer
|
zero_hessian
|
Zero-out hessian.
:param param_groups: PARAMETERS. parameter groups.
:param state: STATE. optimizer state.
:param pre_zero: bool. zero-out hessian before computing the hessian.
|
@staticmethod
def zero_hessian(param_groups: PARAMETERS, state: STATE, pre_zero: bool = True):
r"""Zero-out hessian.
:param param_groups: PARAMETERS. parameter groups.
:param state: STATE. optimizer state.
:param pre_zero: bool. zero-out hessian before computing the hessian.
"""
for group in param_groups:
for p in group['params']:
if p.requires_grad and p.grad is not None and not p.grad.is_sparse:
if 'hessian' not in state[p]:
state[p]['hessian'] = torch.zeros_like(p)
elif pre_zero:
state[p]['hessian'].zero_()
|
(param_groups: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], state: Dict, pre_zero: bool = True)
|
714,777 |
pytorch_optimizer.optimizer.sgd
|
ASGD
|
Adaptive SGD with estimation of the local smoothness (curvature).
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param amplifier: float. amplifier.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param theta: float. theta.
:param dampening: float. dampening for momentum.
:param eps: float. term added to the denominator to improve numerical stability.
|
class ASGD(Optimizer, BaseOptimizer):
r"""Adaptive SGD with estimation of the local smoothness (curvature).
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param amplifier: float. amplifier.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param theta: float. theta.
:param dampening: float. dampening for momentum.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-2,
amplifier: float = 0.02,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
theta: float = 1.0,
dampening: float = 1.0,
eps: float = 1e-5,
):
self.validate_learning_rate(lr)
self.validate_non_negative(amplifier, 'amplifier')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'amplifier': amplifier,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'theta': theta,
'dampening': dampening,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'ASGD'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for _ in group['params']:
pass
@staticmethod
def get_norms_by_group(group: Dict, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Get parameter & gradient norm by group."""
p_norm = torch.zeros(1, dtype=torch.float32, device=device)
g_norm = torch.zeros(1, dtype=torch.float32, device=device)
for p in group['params']:
if p.grad is None:
continue
p_norm.add_(p.norm().pow(2))
g_norm.add_(p.grad.norm().pow(2))
p_norm.sqrt_()
g_norm.sqrt_()
return p_norm, g_norm
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'prev_param_norm' not in group and 'prev_grad_norm' not in group:
group['prev_param_norm'], group['prev_grad_norm'] = self.get_norms_by_group(
group,
device=group['params'][0].device,
)
group['curr_param_norm'], group['curr_grad_norm'] = self.get_norms_by_group(
group,
device=group['params'][0].device,
)
param_diff_norm: float = (group['curr_param_norm'] - group['prev_param_norm']).item()
grad_diff_norm: float = (group['curr_grad_norm'] - group['prev_grad_norm']).item()
new_lr: float = group['lr'] * math.sqrt(1 + group['amplifier'] * group['theta'])
if param_diff_norm > 0 and grad_diff_norm > 0:
new_lr = min(new_lr, param_diff_norm / (group['dampening'] * grad_diff_norm)) + group['eps']
group['theta'] = new_lr / group['lr']
group['lr'] = new_lr
group['prev_param_norm'].copy_(group['curr_param_norm'])
group['prev_grad_norm'].copy_(group['curr_grad_norm'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
p.add_(grad, alpha=-new_lr)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, amplifier: float = 0.02, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, theta: float = 1.0, dampening: float = 1.0, eps: float = 1e-05)
|
714,779 |
pytorch_optimizer.optimizer.sgd
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-2,
amplifier: float = 0.02,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
theta: float = 1.0,
dampening: float = 1.0,
eps: float = 1e-5,
):
self.validate_learning_rate(lr)
self.validate_non_negative(amplifier, 'amplifier')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'amplifier': amplifier,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'theta': theta,
'dampening': dampening,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, amplifier: float = 0.02, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, theta: float = 1.0, dampening: float = 1.0, eps: float = 1e-05)
|
714,782 |
pytorch_optimizer.optimizer.sgd
|
__str__
| null |
def __str__(self) -> str:
return 'ASGD'
|
(self) -> str
|
714,794 |
pytorch_optimizer.optimizer.sgd
|
get_norms_by_group
|
Get parameter & gradient norm by group.
|
@staticmethod
def get_norms_by_group(group: Dict, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Get parameter & gradient norm by group."""
p_norm = torch.zeros(1, dtype=torch.float32, device=device)
g_norm = torch.zeros(1, dtype=torch.float32, device=device)
for p in group['params']:
if p.grad is None:
continue
p_norm.add_(p.norm().pow(2))
g_norm.add_(p.grad.norm().pow(2))
p_norm.sqrt_()
g_norm.sqrt_()
return p_norm, g_norm
|
(group: Dict, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]
|
714,804 |
pytorch_optimizer.optimizer.sgd
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
large_lr: float = group['lr'] * group['kappa'] / group['constant']
alpha: float = 1.0 - (group['xi'] * (group['constant'] ** 2) / group['kappa'])
beta: float = 1.0 - alpha
zeta: float = group['constant'] / (group['constant'] + beta)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum_buffer'] = p.clone()
self.apply_weight_decay(
p,
grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=False,
fixed_decay=False,
)
buf = state['momentum_buffer']
buf.mul_((1.0 / beta) - 1.0).add_(grad, alpha=-large_lr).add_(p).mul_(beta)
p.add_(grad, alpha=-group['lr']).mul_(zeta).add_(buf, alpha=1.0 - zeta)
return loss
|
(self)
|
714,819 |
pytorch_optimizer.optimizer.sgd
|
AccSGD
|
Accelerating Stochastic Gradient Descent For Least Squares Regression.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param kappa: float. ratio of long to short step.
:param xi: float. statistical advantage parameter.
:param constant: float. any small constant under 1.
:param weight_decay: float. weight decay.
|
class AccSGD(Optimizer, BaseOptimizer):
r"""Accelerating Stochastic Gradient Descent For Least Squares Regression.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param kappa: float. ratio of long to short step.
:param xi: float. statistical advantage parameter.
:param constant: float. any small constant under 1.
:param weight_decay: float. weight decay.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
kappa: float = 1000.0,
xi: float = 10.0,
constant: float = 0.7,
weight_decay: float = 0.0,
):
self.validate_learning_rate(lr)
self.validate_non_negative(kappa, 'kappa')
self.validate_non_negative(xi, 'xi')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_boundary(constant, boundary=1.0, bound_type='upper')
defaults: DEFAULTS = {
'lr': lr,
'kappa': kappa,
'xi': xi,
'constant': constant,
'weight_decay': weight_decay,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AccSGD'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['momentum_buffer'] = p.clone()
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
large_lr: float = group['lr'] * group['kappa'] / group['constant']
alpha: float = 1.0 - (group['xi'] * (group['constant'] ** 2) / group['kappa'])
beta: float = 1.0 - alpha
zeta: float = group['constant'] / (group['constant'] + beta)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum_buffer'] = p.clone()
self.apply_weight_decay(
p,
grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=False,
fixed_decay=False,
)
buf = state['momentum_buffer']
buf.mul_((1.0 / beta) - 1.0).add_(grad, alpha=-large_lr).add_(p).mul_(beta)
p.add_(grad, alpha=-group['lr']).mul_(zeta).add_(buf, alpha=1.0 - zeta)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, kappa: float = 1000.0, xi: float = 10.0, constant: float = 0.7, weight_decay: float = 0.0)
|
714,821 |
pytorch_optimizer.optimizer.sgd
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
kappa: float = 1000.0,
xi: float = 10.0,
constant: float = 0.7,
weight_decay: float = 0.0,
):
self.validate_learning_rate(lr)
self.validate_non_negative(kappa, 'kappa')
self.validate_non_negative(xi, 'xi')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_boundary(constant, boundary=1.0, bound_type='upper')
defaults: DEFAULTS = {
'lr': lr,
'kappa': kappa,
'xi': xi,
'constant': constant,
'weight_decay': weight_decay,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, kappa: float = 1000.0, xi: float = 10.0, constant: float = 0.7, weight_decay: float = 0.0)
|
714,824 |
pytorch_optimizer.optimizer.sgd
|
__str__
| null |
def __str__(self) -> str:
return 'AccSGD'
|
(self) -> str
|
714,860 |
pytorch_optimizer.optimizer.adabelief
|
AdaBelief
|
Adapting Step-sizes by the Belief in Observed Gradients.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param rectify: bool. perform the rectified update similar to RAdam.
:param n_sma_threshold: number of SMA threshold (recommended is 5).
:param degenerated_to_sgd: bool. perform SGD update when variance of gradient is high.
:param ams_bound: bool. whether to use the AMSBound variant.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaBelief(Optimizer, BaseOptimizer):
r"""Adapting Step-sizes by the Belief in Observed Gradients.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param rectify: bool. perform the rectified update similar to RAdam.
:param n_sma_threshold: number of SMA threshold (recommended is 5).
:param degenerated_to_sgd: bool. perform SGD update when variance of gradient is high.
:param ams_bound: bool. whether to use the AMSBound variant.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
rectify: bool = False,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = True,
ams_bound: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-16,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'rectify': rectify,
'ams_bound': ams_bound,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaBelief'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_var'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
if group['ams_bound']:
state['max_exp_avg_var'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
step_size, n_sma = self.get_rectify_step_size(
is_rectify=group['rectify'],
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_var'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if group['ams_bound']:
state['max_exp_avg_var'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
grad_residual = grad - exp_avg
exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1.0 - beta2).add_(group['eps'])
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_var,
max_exp_avg_sq=state.get('max_exp_avg_var', None),
eps=group['eps'],
)
if not group['rectify']:
de_nom.div_(bias_correction2_sq)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
continue
if n_sma >= self.n_sma_threshold:
p.addcdiv_(exp_avg, de_nom, value=-step_size)
elif step_size > 0:
p.add_(exp_avg, alpha=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, rectify: bool = False, n_sma_threshold: int = 5, degenerated_to_sgd: bool = True, ams_bound: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-16)
|
714,862 |
pytorch_optimizer.optimizer.adabelief
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
rectify: bool = False,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = True,
ams_bound: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-16,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'rectify': rectify,
'ams_bound': ams_bound,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, rectify: bool = False, n_sma_threshold: int = 5, degenerated_to_sgd: bool = True, ams_bound: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-16)
|
714,865 |
pytorch_optimizer.optimizer.adabelief
|
__str__
| null |
def __str__(self) -> str:
return 'AdaBelief'
|
(self) -> str
|
714,886 |
pytorch_optimizer.optimizer.adabelief
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
step_size, n_sma = self.get_rectify_step_size(
is_rectify=group['rectify'],
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_var'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if group['ams_bound']:
state['max_exp_avg_var'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
grad_residual = grad - exp_avg
exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1.0 - beta2).add_(group['eps'])
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_var,
max_exp_avg_sq=state.get('max_exp_avg_var', None),
eps=group['eps'],
)
if not group['rectify']:
de_nom.div_(bias_correction2_sq)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
continue
if n_sma >= self.n_sma_threshold:
p.addcdiv_(exp_avg, de_nom, value=-step_size)
elif step_size > 0:
p.add_(exp_avg, alpha=-step_size)
return loss
|
(self)
|
714,901 |
pytorch_optimizer.optimizer.adabound
|
AdaBound
|
Adaptive Gradient Methods with Dynamic Bound of Learning Rate.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param final_lr: float. final learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param gamma: float. convergence speed of the bound functions.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the AMSBound variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaBound(Optimizer, BaseOptimizer):
r"""Adaptive Gradient Methods with Dynamic Bound of Learning Rate.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param final_lr: float. final learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param gamma: float. convergence speed of the bound functions.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the AMSBound variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
final_lr: float = 1e-1,
betas: BETAS = (0.9, 0.999),
gamma: float = 1e-3,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
ams_bound: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'final_lr': final_lr,
'gamma': gamma,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
self.base_lrs: List[float] = [group['lr'] for group in self.param_groups]
def __str__(self) -> str:
return 'AdaBound'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
final_lr: float = group['final_lr'] * group['lr'] / base_lr
lower_bound: float = final_lr * (1 - 1 / (group['gamma'] * group['step'] + 1))
upper_bound: float = final_lr * (1 + 1 / (group['gamma'] * group['step']))
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_sq,
max_exp_avg_sq=state.get('max_exp_avg_sq', None),
eps=group['eps'],
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'] * bias_correction2_sq,
bias_correction1=bias_correction1,
)
step_size = torch.full_like(de_nom, fill_value=step_size)
step_size.div_(de_nom).clamp_(min=lower_bound, max=upper_bound).mul_(exp_avg)
p.add_(-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, final_lr: float = 0.1, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), gamma: float = 0.001, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, ams_bound: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
714,903 |
pytorch_optimizer.optimizer.adabound
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
final_lr: float = 1e-1,
betas: BETAS = (0.9, 0.999),
gamma: float = 1e-3,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
ams_bound: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'final_lr': final_lr,
'gamma': gamma,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
self.base_lrs: List[float] = [group['lr'] for group in self.param_groups]
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, final_lr: float = 0.1, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), gamma: float = 0.001, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, ams_bound: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
714,906 |
pytorch_optimizer.optimizer.adabound
|
__str__
| null |
def __str__(self) -> str:
return 'AdaBound'
|
(self) -> str
|
714,927 |
pytorch_optimizer.optimizer.adabound
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
final_lr: float = group['final_lr'] * group['lr'] / base_lr
lower_bound: float = final_lr * (1 - 1 / (group['gamma'] * group['step'] + 1))
upper_bound: float = final_lr * (1 + 1 / (group['gamma'] * group['step']))
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_sq,
max_exp_avg_sq=state.get('max_exp_avg_sq', None),
eps=group['eps'],
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'] * bias_correction2_sq,
bias_correction1=bias_correction1,
)
step_size = torch.full_like(de_nom, fill_value=step_size)
step_size.div_(de_nom).clamp_(min=lower_bound, max=upper_bound).mul_(exp_avg)
p.add_(-step_size)
return loss
|
(self)
|
714,942 |
pytorch_optimizer.optimizer.adadelta
|
AdaDelta
|
An Adaptive Learning Rate Method.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param rho: float. coefficient used for computing a running average of squared gradients.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaDelta(Optimizer, BaseOptimizer):
r"""An Adaptive Learning Rate Method.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param rho: float. coefficient used for computing a running average of squared gradients.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
rho: float = 0.9,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_range(rho, 'rho', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'rho': rho,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaDelta'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['square_avg'] = torch.zeros_like(p)
state['acc_delta'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
rho: float = group['rho']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['square_avg'] = torch.zeros_like(p)
state['acc_delta'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
square_avg, acc_delta = state['square_avg'], state['acc_delta']
square_avg.mul_(rho).addcmul_(grad, grad, value=1.0 - rho)
std = square_avg.add(group['eps']).sqrt_()
delta = acc_delta.add(group['eps']).sqrt_().div_(std).mul_(grad)
acc_delta.mul_(rho).addcmul_(delta, delta, value=1.0 - rho)
p.add_(delta, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, rho: float = 0.9, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 1e-06)
|
714,944 |
pytorch_optimizer.optimizer.adadelta
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
rho: float = 0.9,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_range(rho, 'rho', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'rho': rho,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, rho: float = 0.9, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 1e-06)
|
714,947 |
pytorch_optimizer.optimizer.adadelta
|
__str__
| null |
def __str__(self) -> str:
return 'AdaDelta'
|
(self) -> str
|
714,983 |
pytorch_optimizer.optimizer.adafactor
|
AdaFactor
|
Adaptive Learning Rates with Sublinear Memory Cost.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param decay_rate: float. coefficient used to compute running averages of square gradient.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param clip_threshold: float. threshold of root-mean-square of final gradient update.
:param ams_bound: bool. whether to use the AMSBound variant.
:param scale_parameter: bool. if true, learning rate is scaled by root-mean-square of parameter.
:param relative_step: bool. if true, time-dependent learning rate is computed instead of external learning rate.
:param warmup_init: bool. time-dependent learning rate computation depends on whether warm-up initialization
is being used.
:param eps1: float. term added to the denominator to improve numerical stability.
:param eps2: float. term added to the denominator to improve numerical stability.
|
class AdaFactor(Optimizer, BaseOptimizer):
r"""Adaptive Learning Rates with Sublinear Memory Cost.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param decay_rate: float. coefficient used to compute running averages of square gradient.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param clip_threshold: float. threshold of root-mean-square of final gradient update.
:param ams_bound: bool. whether to use the AMSBound variant.
:param scale_parameter: bool. if true, learning rate is scaled by root-mean-square of parameter.
:param relative_step: bool. if true, time-dependent learning rate is computed instead of external learning rate.
:param warmup_init: bool. time-dependent learning rate computation depends on whether warm-up initialization
is being used.
:param eps1: float. term added to the denominator to improve numerical stability.
:param eps2: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: Optional[float] = 1e-3,
betas: BETAS = (0.9, 0.999),
decay_rate: float = -0.8,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
clip_threshold: float = 1.0,
ams_bound: bool = False,
scale_parameter: bool = True,
relative_step: bool = True,
warmup_init: bool = False,
eps1: float = 1e-30,
eps2: float = 1e-3,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps1, 'eps1')
self.validate_non_negative(eps2, 'eps2')
self.decay_rate = decay_rate
self.clip_threshold = clip_threshold
self.eps1 = eps1
self.eps2 = eps2
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'scale_parameter': scale_parameter,
'relative_step': relative_step,
'warmup_init': warmup_init,
'eps1': eps1,
'eps2': eps2,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaFactor'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
grad = p.grad
grad_shape: Tuple[int, ...] = grad.shape
factored: bool = self.get_options(grad_shape)
state['exp_avg'] = torch.zeros_like(p)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1], dtype=grad.dtype, device=grad.device)
state['exp_avg_sq_col'] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:], dtype=grad.dtype, device=grad.device
)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
if group['ams_bound']:
state['exp_avg_sq_hat'] = torch.zeros_like(grad)
state['RMS'] = 0.0
def get_lr(
self, lr: float, step: int, rms: float, relative_step: bool, warmup_init: bool, scale_parameter: bool
) -> float:
r"""Get AdaFactor learning rate."""
relative_step_size: float = lr
if relative_step:
min_step: float = 1e-6 * step if warmup_init else 1e-2
relative_step_size = min(min_step, 1.0 / math.sqrt(step))
param_scale: float = 1.0 if scale_parameter else max(self.eps2, rms)
return param_scale * relative_step_size
@staticmethod
def get_options(shape: Tuple[int, ...]) -> bool:
r"""Get `factored`."""
return len(shape) >= 2
@staticmethod
def get_rms(x: torch.Tensor) -> float:
r"""Get RMS."""
return x.norm(2) / math.sqrt(x.numel())
@staticmethod
def approximate_sq_grad(
exp_avg_sq_row: torch.Tensor,
exp_avg_sq_col: torch.Tensor,
output: torch.Tensor,
):
r"""Get approximation of EMA of squared gradient."""
r_factor: torch.Tensor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor: torch.Tensor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
torch.mul(r_factor, c_factor, out=output)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, _ = group['betas']
beta2_t: float = 1.0 - math.pow(group['step'], self.decay_rate)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
grad_shape: Tuple[int, ...] = grad.shape
factored: bool = self.get_options(grad_shape)
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1], dtype=grad.dtype, device=grad.device)
state['exp_avg_sq_col'] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:], dtype=grad.dtype, device=grad.device
)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
if group['ams_bound']:
state['exp_avg_sq_hat'] = torch.zeros_like(grad)
state['RMS'] = 0.0
state['RMS'] = self.get_rms(p)
lr: float = self.get_lr(
lr=group['lr'],
step=group['step'],
rms=state['RMS'],
relative_step=group['relative_step'],
warmup_init=group['warmup_init'],
scale_parameter=group['scale_parameter'],
)
update = torch.mul(grad, grad).add_(self.eps1)
if factored:
exp_avg_sq_row, exp_avg_sq_col = state['exp_avg_sq_row'], state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2_t).add_(update.mean(dim=-1), alpha=1.0 - beta2_t)
exp_avg_sq_col.mul_(beta2_t).add_(update.mean(dim=-2), alpha=1.0 - beta2_t)
self.approximate_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2_t).add_(update, alpha=1.0 - beta2_t)
torch.rsqrt(exp_avg_sq, out=update)
if group['ams_bound']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
torch.max(exp_avg_sq_hat, 1 / update, out=exp_avg_sq_hat)
torch.rsqrt(exp_avg_sq_hat / beta2_t, out=update)
update.mul_(grad)
update.div_((self.get_rms(update) / self.clip_threshold).clamp_(min=1.0)).mul_(lr)
exp_avg = state['exp_avg']
exp_avg.mul_(beta1).add_(update, alpha=1.0 - beta1)
self.apply_weight_decay(
p=p,
grad=None,
lr=lr,
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
p.add_(-exp_avg)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: Optional[float] = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), decay_rate: float = -0.8, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, clip_threshold: float = 1.0, ams_bound: bool = False, scale_parameter: bool = True, relative_step: bool = True, warmup_init: bool = False, eps1: float = 1e-30, eps2: float = 0.001)
|
714,985 |
pytorch_optimizer.optimizer.adafactor
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: Optional[float] = 1e-3,
betas: BETAS = (0.9, 0.999),
decay_rate: float = -0.8,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
clip_threshold: float = 1.0,
ams_bound: bool = False,
scale_parameter: bool = True,
relative_step: bool = True,
warmup_init: bool = False,
eps1: float = 1e-30,
eps2: float = 1e-3,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps1, 'eps1')
self.validate_non_negative(eps2, 'eps2')
self.decay_rate = decay_rate
self.clip_threshold = clip_threshold
self.eps1 = eps1
self.eps2 = eps2
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'scale_parameter': scale_parameter,
'relative_step': relative_step,
'warmup_init': warmup_init,
'eps1': eps1,
'eps2': eps2,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: Optional[float] = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), decay_rate: float = -0.8, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, clip_threshold: float = 1.0, ams_bound: bool = False, scale_parameter: bool = True, relative_step: bool = True, warmup_init: bool = False, eps1: float = 1e-30, eps2: float = 0.001)
|
714,988 |
pytorch_optimizer.optimizer.adafactor
|
__str__
| null |
def __str__(self) -> str:
return 'AdaFactor'
|
(self) -> str
|
714,998 |
pytorch_optimizer.optimizer.adafactor
|
approximate_sq_grad
|
Get approximation of EMA of squared gradient.
|
@staticmethod
def approximate_sq_grad(
exp_avg_sq_row: torch.Tensor,
exp_avg_sq_col: torch.Tensor,
output: torch.Tensor,
):
r"""Get approximation of EMA of squared gradient."""
r_factor: torch.Tensor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor: torch.Tensor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
torch.mul(r_factor, c_factor, out=output)
|
(exp_avg_sq_row: torch.Tensor, exp_avg_sq_col: torch.Tensor, output: torch.Tensor)
|
715,001 |
pytorch_optimizer.optimizer.adafactor
|
get_lr
|
Get AdaFactor learning rate.
|
def get_lr(
self, lr: float, step: int, rms: float, relative_step: bool, warmup_init: bool, scale_parameter: bool
) -> float:
r"""Get AdaFactor learning rate."""
relative_step_size: float = lr
if relative_step:
min_step: float = 1e-6 * step if warmup_init else 1e-2
relative_step_size = min(min_step, 1.0 / math.sqrt(step))
param_scale: float = 1.0 if scale_parameter else max(self.eps2, rms)
return param_scale * relative_step_size
|
(self, lr: float, step: int, rms: float, relative_step: bool, warmup_init: bool, scale_parameter: bool) -> float
|
715,002 |
pytorch_optimizer.optimizer.adafactor
|
get_options
|
Get `factored`.
|
@staticmethod
def get_options(shape: Tuple[int, ...]) -> bool:
r"""Get `factored`."""
return len(shape) >= 2
|
(shape: Tuple[int, ...]) -> bool
|
715,004 |
pytorch_optimizer.optimizer.adafactor
|
get_rms
|
Get RMS.
|
@staticmethod
def get_rms(x: torch.Tensor) -> float:
r"""Get RMS."""
return x.norm(2) / math.sqrt(x.numel())
|
(x: torch.Tensor) -> float
|
715,013 |
pytorch_optimizer.optimizer.adafactor
|
reset
| null |
def get_lr(
self, lr: float, step: int, rms: float, relative_step: bool, warmup_init: bool, scale_parameter: bool
) -> float:
r"""Get AdaFactor learning rate."""
relative_step_size: float = lr
if relative_step:
min_step: float = 1e-6 * step if warmup_init else 1e-2
relative_step_size = min(min_step, 1.0 / math.sqrt(step))
param_scale: float = 1.0 if scale_parameter else max(self.eps2, rms)
return param_scale * relative_step_size
|
(self)
|
715,028 |
pytorch_optimizer.optimizer.shampoo_utils
|
AdaGradGraft
|
Graft using AdaGrad. Essentially an implementation of AdaGrad with momentum.
:param var: torch.Tensor. variable.
:param diagonal_eps: float. diagonal epsilon.
|
class AdaGradGraft(SGDGraft):
r"""Graft using AdaGrad. Essentially an implementation of AdaGrad with momentum.
:param var: torch.Tensor. variable.
:param diagonal_eps: float. diagonal epsilon.
"""
def __init__(self, var: torch.Tensor, diagonal_eps: float):
super().__init__(var)
self.diagonal_eps = diagonal_eps
self.statistics: torch.Tensor = torch.zeros_like(var)
def add_statistics(self, grad: torch.Tensor, _):
r"""Add the statistics."""
self.statistics.add_(grad.pow(2))
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad / (torch.sqrt(self.statistics) + self.diagonal_eps)
|
(var: torch.Tensor, diagonal_eps: float)
|
715,029 |
pytorch_optimizer.optimizer.shampoo_utils
|
__init__
| null |
def __init__(self, var: torch.Tensor, diagonal_eps: float):
super().__init__(var)
self.diagonal_eps = diagonal_eps
self.statistics: torch.Tensor = torch.zeros_like(var)
|
(self, var: torch.Tensor, diagonal_eps: float)
|
715,030 |
pytorch_optimizer.optimizer.shampoo_utils
|
add_statistics
|
Add the statistics.
|
def add_statistics(self, grad: torch.Tensor, _):
r"""Add the statistics."""
self.statistics.add_(grad.pow(2))
|
(self, grad: torch.Tensor, _)
|
715,031 |
pytorch_optimizer.optimizer.shampoo_utils
|
precondition_gradient
|
Get preconditioned gradient.
|
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad / (torch.sqrt(self.statistics) + self.diagonal_eps)
|
(self, grad: torch.Tensor) -> torch.Tensor
|
715,032 |
pytorch_optimizer.optimizer.shampoo_utils
|
update_momentum
|
Update momentum.
|
def update_momentum(self, update: torch.Tensor, beta1: float) -> torch.Tensor:
r"""Update momentum."""
self.momentum.mul_(beta1).add_(update)
return self.momentum
|
(self, update: torch.Tensor, beta1: float) -> torch.Tensor
|
715,033 |
pytorch_optimizer.optimizer.adahessian
|
AdaHessian
|
An Adaptive Second Order Optimizer for Machine Learning.
Requires `loss.backward(create_graph=True)` in order to calculate hessians.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param hessian_power: float. exponent of the hessian trace.
:param update_period: int. number of steps after which to apply hessian approximation.
:param num_samples: int. times to sample `z` for the approximation of the hessian trace.
:param hessian_distribution: HUTCHINSON_G. type of distribution to initialize hessian.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaHessian(Optimizer, BaseOptimizer):
r"""An Adaptive Second Order Optimizer for Machine Learning.
Requires `loss.backward(create_graph=True)` in order to calculate hessians.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param hessian_power: float. exponent of the hessian trace.
:param update_period: int. number of steps after which to apply hessian approximation.
:param num_samples: int. times to sample `z` for the approximation of the hessian trace.
:param hessian_distribution: HUTCHINSON_G. type of distribution to initialize hessian.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-1,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
hessian_power: float = 1.0,
update_period: int = 1,
num_samples: int = 1,
hessian_distribution: HUTCHINSON_G = 'rademacher',
adam_debias: bool = False,
eps: float = 1e-16,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.validate_range(hessian_power, 'Hessian Power', 0, 1, range_type='(]')
self.update_period = update_period
self.num_samples = num_samples
self.distribution = hessian_distribution
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'hessian_power': hessian_power,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaHessian'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_hessian_diag_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None, hessian: Optional[List[torch.Tensor]] = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
step: int = self.param_groups[0].get('step', 1)
if hessian is not None:
self.set_hessian(self.param_groups, self.state, hessian)
elif step % self.update_period == 0:
self.zero_hessian(self.param_groups, self.state)
self.compute_hutchinson_hessian(
param_groups=self.param_groups,
state=self.state,
num_samples=self.num_samples,
distribution=self.distribution,
)
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2: float = 1.0 - beta2 ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if 'exp_avg' not in state:
state['exp_avg'] = torch.zeros_like(p)
state['exp_hessian_diag_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
if 'hessian' in state and (group['step'] % self.update_period == 0 or hessian is not None):
exp_hessian_diag_sq.mul_(beta2).addcmul_(state['hessian'], state['hessian'], value=1.0 - beta2)
de_nom = (exp_hessian_diag_sq / bias_correction2).pow_(group['hessian_power'] / 2).add_(group['eps'])
step_size: float = self.apply_adam_debias(group['adam_debias'], group['lr'], bias_correction1)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.1, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, hessian_power: float = 1.0, update_period: int = 1, num_samples: int = 1, hessian_distribution: Literal['gaussian', 'rademacher'] = 'rademacher', adam_debias: bool = False, eps: float = 1e-16)
|
715,035 |
pytorch_optimizer.optimizer.adahessian
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-1,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
hessian_power: float = 1.0,
update_period: int = 1,
num_samples: int = 1,
hessian_distribution: HUTCHINSON_G = 'rademacher',
adam_debias: bool = False,
eps: float = 1e-16,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.validate_range(hessian_power, 'Hessian Power', 0, 1, range_type='(]')
self.update_period = update_period
self.num_samples = num_samples
self.distribution = hessian_distribution
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'hessian_power': hessian_power,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.1, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, hessian_power: float = 1.0, update_period: int = 1, num_samples: int = 1, hessian_distribution: Literal['gaussian', 'rademacher'] = 'rademacher', adam_debias: bool = False, eps: float = 1e-16)
|
715,038 |
pytorch_optimizer.optimizer.adahessian
|
__str__
| null |
def __str__(self) -> str:
return 'AdaHessian'
|
(self) -> str
|
715,059 |
pytorch_optimizer.optimizer.adahessian
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None, hessian: Optional[List[torch.Tensor]] = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
step: int = self.param_groups[0].get('step', 1)
if hessian is not None:
self.set_hessian(self.param_groups, self.state, hessian)
elif step % self.update_period == 0:
self.zero_hessian(self.param_groups, self.state)
self.compute_hutchinson_hessian(
param_groups=self.param_groups,
state=self.state,
num_samples=self.num_samples,
distribution=self.distribution,
)
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2: float = 1.0 - beta2 ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if 'exp_avg' not in state:
state['exp_avg'] = torch.zeros_like(p)
state['exp_hessian_diag_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
if 'hessian' in state and (group['step'] % self.update_period == 0 or hessian is not None):
exp_hessian_diag_sq.mul_(beta2).addcmul_(state['hessian'], state['hessian'], value=1.0 - beta2)
de_nom = (exp_hessian_diag_sq / bias_correction2).pow_(group['hessian_power'] / 2).add_(group['eps'])
step_size: float = self.apply_adam_debias(group['adam_debias'], group['lr'], bias_correction1)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
return loss
|
(self)
|
715,074 |
pytorch_optimizer.optimizer.adamax
|
AdaMax
|
An Adaptive and Momental Bound Method for Stochastic Learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaMax(Optimizer, BaseOptimizer):
r"""An Adaptive and Momental Bound Method for Stochastic Learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaMax'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_inf'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_inf'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_inf = state['exp_avg'], state['exp_inf']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
norm_buf = torch.cat(
(exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(group['eps']).unsqueeze_(0)),
dim=0,
)
torch.max(norm_buf, dim=0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'],
bias_correction1=bias_correction1,
)
p.addcdiv_(exp_avg, exp_inf, value=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,076 |
pytorch_optimizer.optimizer.adamax
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,079 |
pytorch_optimizer.optimizer.adamax
|
__str__
| null |
def __str__(self) -> str:
return 'AdaMax'
|
(self) -> str
|
715,100 |
pytorch_optimizer.optimizer.adamax
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_inf'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_inf = state['exp_avg'], state['exp_inf']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
norm_buf = torch.cat(
(exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(group['eps']).unsqueeze_(0)),
dim=0,
)
torch.max(norm_buf, dim=0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'],
bias_correction1=bias_correction1,
)
p.addcdiv_(exp_avg, exp_inf, value=-step_size)
return loss
|
(self)
|
715,115 |
pytorch_optimizer.optimizer.adamod
|
AdaMod
|
An Adaptive and Momental Bound Method for Stochastic Learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
beta3 is for smoothing coefficient for adaptive learning rates.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaMod(Optimizer, BaseOptimizer):
r"""An Adaptive and Momental Bound Method for Stochastic Learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
beta3 is for smoothing coefficient for adaptive learning rates.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.99, 0.9999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaMod'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_lr'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2, beta3 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_lr'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'] * bias_correction2_sq,
bias_correction1=bias_correction1,
)
step_size = torch.full_like(de_nom, fill_value=step_size)
step_size.div_(de_nom)
exp_avg_lr = state['exp_avg_lr']
exp_avg_lr.mul_(beta3).add_(step_size, alpha=1.0 - beta3)
torch.min(step_size, exp_avg_lr, out=step_size)
step_size.mul_(exp_avg)
p.add_(-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.99, 0.9999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,117 |
pytorch_optimizer.optimizer.adamod
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.99, 0.9999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.99, 0.9999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,120 |
pytorch_optimizer.optimizer.adamod
|
__str__
| null |
def __str__(self) -> str:
return 'AdaMod'
|
(self) -> str
|
715,141 |
pytorch_optimizer.optimizer.adamod
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2, beta3 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_lr'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'] * bias_correction2_sq,
bias_correction1=bias_correction1,
)
step_size = torch.full_like(de_nom, fill_value=step_size)
step_size.div_(de_nom)
exp_avg_lr = state['exp_avg_lr']
exp_avg_lr.mul_(beta3).add_(step_size, alpha=1.0 - beta3)
torch.min(step_size, exp_avg_lr, out=step_size)
step_size.mul_(exp_avg)
p.add_(-step_size)
return loss
|
(self)
|
715,156 |
pytorch_optimizer.optimizer.adanorm
|
AdaNorm
|
Symbolic Discovery of Optimization Algorithms.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the AMSBound variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaNorm(Optimizer, BaseOptimizer):
r"""Symbolic Discovery of Optimization Algorithms.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the AMSBound variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.99),
r: float = 0.95,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
ams_bound: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'r': r,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaNorm'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_var'] = torch.zeros_like(p)
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
if group['ams_bound']:
state['max_exp_avg_var'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_var'] = torch.zeros_like(p)
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
if group['ams_bound']:
state['max_exp_avg_var'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=True,
exp_grad_norm=state['exp_grad_norm'],
r=group['r'],
)
exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_var.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_var,
max_exp_avg_sq=state.get('max_exp_avg_var', None),
eps=group['eps'],
)
de_nom.div_(bias_correction2_sq)
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'],
bias_correction1=bias_correction1,
)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.99), r: float = 0.95, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, ams_bound: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,158 |
pytorch_optimizer.optimizer.adanorm
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.99),
r: float = 0.95,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
ams_bound: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'r': r,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.99), r: float = 0.95, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, ams_bound: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,161 |
pytorch_optimizer.optimizer.adanorm
|
__str__
| null |
def __str__(self) -> str:
return 'AdaNorm'
|
(self) -> str
|
715,182 |
pytorch_optimizer.optimizer.adanorm
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_var'] = torch.zeros_like(p)
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
if group['ams_bound']:
state['max_exp_avg_var'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=True,
exp_grad_norm=state['exp_grad_norm'],
r=group['r'],
)
exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_var.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_var,
max_exp_avg_sq=state.get('max_exp_avg_var', None),
eps=group['eps'],
)
de_nom.div_(bias_correction2_sq)
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'],
bias_correction1=bias_correction1,
)
p.addcdiv_(exp_avg, de_nom, value=-step_size)
return loss
|
(self)
|
715,197 |
pytorch_optimizer.optimizer.adapnm
|
AdaPNM
|
Adam + Positive-Negative Momentum Optimizers.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. use weight_decouple.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the ams_bound variant.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaPNM(Optimizer, BaseOptimizer):
r"""Adam + Positive-Negative Momentum Optimizers.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. use weight_decouple.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the ams_bound variant.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999, 1.0),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
ams_bound: bool = True,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaPNM'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['neg_exp_avg'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2, beta3 = group['betas']
noise_norm: float = math.sqrt((1 + beta3) ** 2 + beta3 ** 2) # fmt: skip
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['neg_exp_avg'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['step'] % 2 == 1:
exp_avg, neg_exp_avg = state['exp_avg'], state['neg_exp_avg']
else:
exp_avg, neg_exp_avg = state['neg_exp_avg'], state['exp_avg']
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg_sq = state['exp_avg_sq']
exp_avg.mul_(beta1 ** 2).add_(s_grad, alpha=1.0 - beta1 ** 2) # fmt: skip
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_sq,
max_exp_avg_sq=state.get('max_exp_avg_sq', None),
eps=group['eps'],
)
de_nom.div_(bias_correction2_sq)
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'], step_size=group['lr'], bias_correction1=bias_correction1
)
pn_momentum = exp_avg.mul(1.0 + beta3).add_(neg_exp_avg, alpha=-beta3).mul_(1.0 / noise_norm)
p.addcdiv_(pn_momentum, de_nom, value=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999, 1.0), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, ams_bound: bool = True, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,199 |
pytorch_optimizer.optimizer.adapnm
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999, 1.0),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
ams_bound: bool = True,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999, 1.0), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, ams_bound: bool = True, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,202 |
pytorch_optimizer.optimizer.adapnm
|
__str__
| null |
def __str__(self) -> str:
return 'AdaPNM'
|
(self) -> str
|
715,223 |
pytorch_optimizer.optimizer.adapnm
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2, beta3 = group['betas']
noise_norm: float = math.sqrt((1 + beta3) ** 2 + beta3 ** 2) # fmt: skip
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['neg_exp_avg'] = torch.zeros_like(p)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['step'] % 2 == 1:
exp_avg, neg_exp_avg = state['exp_avg'], state['neg_exp_avg']
else:
exp_avg, neg_exp_avg = state['neg_exp_avg'], state['exp_avg']
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg_sq = state['exp_avg_sq']
exp_avg.mul_(beta1 ** 2).add_(s_grad, alpha=1.0 - beta1 ** 2) # fmt: skip
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_sq,
max_exp_avg_sq=state.get('max_exp_avg_sq', None),
eps=group['eps'],
)
de_nom.div_(bias_correction2_sq)
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'], step_size=group['lr'], bias_correction1=bias_correction1
)
pn_momentum = exp_avg.mul(1.0 + beta3).add_(neg_exp_avg, alpha=-beta3).mul_(1.0 / noise_norm)
p.addcdiv_(pn_momentum, de_nom, value=-step_size)
return loss
|
(self)
|
715,238 |
pytorch_optimizer.optimizer.adashift
|
AdaShift
|
Decorrelation and Convergence of Adaptive Learning Rate Methods.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param keep_num: int. number of gradients used to compute first moment estimation.
:param reduce_func: Optional[Callable]. function applied to squared gradients to further reduce the correlation.
If None, no function is applied.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaShift(Optimizer, BaseOptimizer):
r"""Decorrelation and Convergence of Adaptive Learning Rate Methods.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param keep_num: int. number of gradients used to compute first moment estimation.
:param reduce_func: Optional[Callable]. function applied to squared gradients to further reduce the correlation.
If None, no function is applied.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
keep_num: int = 10,
reduce_func: Optional[Callable] = torch.max,
eps: float = 1e-10,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_positive(keep_num, 'keep_num')
self.validate_non_negative(eps, 'eps')
self.reduce_func: Callable = reduce_func if reduce_func is not None else lambda x: x
defaults: DEFAULTS = {'lr': lr, 'betas': betas, 'keep_num': keep_num, 'eps': eps}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaShift'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['grad_queue'] = deque([p.grad.clone()], maxlen=group['keep_num'])
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
exp_weight_sum: int = sum(beta1 ** i for i in range(group['keep_num'])) # fmt: skip
first_grad_weight: float = beta1 ** (group['keep_num'] - 1) / exp_weight_sum
last_grad_weight: float = 1.0 / exp_weight_sum
bias_correction: float = 1.0 - beta2 ** (group['step'] - group['keep_num'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['grad_queue'] = deque([grad.clone()], maxlen=group['keep_num'])
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
grad_queue = state['grad_queue']
grad_queue.append(grad.clone())
if len(grad_queue) != group['keep_num']:
continue
offset_grad = grad_queue[0]
exp_avg = state['exp_avg']
exp_avg.sub_(offset_grad, alpha=first_grad_weight).mul_(beta1).add_(grad, alpha=last_grad_weight)
reduced_grad_sq = self.reduce_func(offset_grad.mul_(offset_grad))
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2).add_(reduced_grad_sq, alpha=1.0 - beta2)
de_nom = exp_avg_sq.div(bias_correction).sqrt_().add_(group['eps'])
p.addcdiv_(exp_avg, de_nom, value=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), keep_num: int = 10, reduce_func: Optional[Callable] = <built-in method max of type object at 0x7f1ddc481760>, eps: float = 1e-10)
|
715,240 |
pytorch_optimizer.optimizer.adashift
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
keep_num: int = 10,
reduce_func: Optional[Callable] = torch.max,
eps: float = 1e-10,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_positive(keep_num, 'keep_num')
self.validate_non_negative(eps, 'eps')
self.reduce_func: Callable = reduce_func if reduce_func is not None else lambda x: x
defaults: DEFAULTS = {'lr': lr, 'betas': betas, 'keep_num': keep_num, 'eps': eps}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), keep_num: int = 10, reduce_func: Optional[Callable] = <built-in method max of type object at 0x7f1ddc481760>, eps: float = 1e-10)
|
715,243 |
pytorch_optimizer.optimizer.adashift
|
__str__
| null |
def __str__(self) -> str:
return 'AdaShift'
|
(self) -> str
|
715,264 |
pytorch_optimizer.optimizer.adashift
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
exp_weight_sum: int = sum(beta1 ** i for i in range(group['keep_num'])) # fmt: skip
first_grad_weight: float = beta1 ** (group['keep_num'] - 1) / exp_weight_sum
last_grad_weight: float = 1.0 / exp_weight_sum
bias_correction: float = 1.0 - beta2 ** (group['step'] - group['keep_num'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['grad_queue'] = deque([grad.clone()], maxlen=group['keep_num'])
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
grad_queue = state['grad_queue']
grad_queue.append(grad.clone())
if len(grad_queue) != group['keep_num']:
continue
offset_grad = grad_queue[0]
exp_avg = state['exp_avg']
exp_avg.sub_(offset_grad, alpha=first_grad_weight).mul_(beta1).add_(grad, alpha=last_grad_weight)
reduced_grad_sq = self.reduce_func(offset_grad.mul_(offset_grad))
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2).add_(reduced_grad_sq, alpha=1.0 - beta2)
de_nom = exp_avg_sq.div(bias_correction).sqrt_().add_(group['eps'])
p.addcdiv_(exp_avg, de_nom, value=-group['lr'])
return loss
|
(self)
|
715,279 |
pytorch_optimizer.optimizer.adasmooth
|
AdaSmooth
|
An Adaptive Learning Rate Method based on Effective Ratio.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdaSmooth(Optimizer, BaseOptimizer):
r"""An Adaptive Learning Rate Method based on Effective Ratio.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.5, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdaSmooth'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['prev_param'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
state['n'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['prev_param'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
state['n'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
prev_param = state['prev_param']
p_diff = p - prev_param
s, n = state['s'], state['n']
s.add_(p_diff)
n.add_(p_diff.abs())
c = s.sum().abs_().div_(n.sum()) # e_t
c.mul_(beta2 - beta1).add_(1.0 - beta2)
c_p2 = c.pow(2)
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(1.0 - c_p2).addcmul_(grad, grad, value=c_p2)
step_size = torch.full_like(exp_avg_sq, fill_value=group['lr'])
step_size.div_((exp_avg_sq + group['eps']).sqrt()).mul_(grad)
p.add_(-step_size)
state['prev_param'].copy_(p)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.5, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 1e-06)
|
715,281 |
pytorch_optimizer.optimizer.adasmooth
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.5, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.5, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 1e-06)
|
715,284 |
pytorch_optimizer.optimizer.adasmooth
|
__str__
| null |
def __str__(self) -> str:
return 'AdaSmooth'
|
(self) -> str
|
715,305 |
pytorch_optimizer.optimizer.adasmooth
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['prev_param'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
state['n'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
prev_param = state['prev_param']
p_diff = p - prev_param
s, n = state['s'], state['n']
s.add_(p_diff)
n.add_(p_diff.abs())
c = s.sum().abs_().div_(n.sum()) # e_t
c.mul_(beta2 - beta1).add_(1.0 - beta2)
c_p2 = c.pow(2)
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(1.0 - c_p2).addcmul_(grad, grad, value=c_p2)
step_size = torch.full_like(exp_avg_sq, fill_value=group['lr'])
step_size.div_((exp_avg_sq + group['eps']).sqrt()).mul_(grad)
p.add_(-step_size)
state['prev_param'].copy_(p)
return loss
|
(self)
|
715,320 |
pytorch_optimizer.optimizer.adai
|
Adai
|
Disentangling the Effects of Adaptive Learning Rate and Momentum.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param stable_weight_decay: bool. perform stable weight decay.
:param dampening: float. dampening for momentum. where dampening < 1, it will show some adaptive-moment behavior.
:param use_gc: bool. use gradient centralization.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Adai(Optimizer, BaseOptimizer):
r"""Disentangling the Effects of Adaptive Learning Rate and Momentum.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param stable_weight_decay: bool. perform stable weight decay.
:param dampening: float. dampening for momentum. where dampening < 1, it will show some adaptive-moment behavior.
:param use_gc: bool. use gradient centralization.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.1, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
stable_weight_decay: bool = False,
dampening: float = 1.0,
use_gc: bool = False,
eps: float = 1e-3,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.use_gc = use_gc
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'stable_weight_decay': stable_weight_decay,
'dampening': dampening,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Adai'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['beta1_prod'] = torch.ones_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
param_size: int = 0
exp_avg_sq_hat_sum: float = 0.0
for group in self.param_groups:
_, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
param_size += p.numel()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['beta1_prod'] = torch.ones_like(p)
state['step'] += 1
if self.use_gc:
centralize_gradient(grad, gc_conv_only=False)
bias_correction2: float = 1.0 - beta2 ** state['step']
if not group['stable_weight_decay'] and group['weight_decay'] > 0.0:
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
exp_avg_sq_hat_sum += exp_avg_sq.sum() / bias_correction2
if param_size == 0:
raise ZeroParameterSizeError()
exp_avg_sq_hat_mean = exp_avg_sq_hat_sum / param_size
for group in self.param_groups:
beta0, beta2 = group['betas']
beta0_dp: float = math.pow(beta0, 1.0 - group['dampening'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if group['stable_weight_decay'] and group['weight_decay'] > 0.0:
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
bias_correction2: float = 1.0 - beta2 ** state['step']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg_sq_hat = exp_avg_sq / bias_correction2
beta1 = (
1.0
- (exp_avg_sq_hat / exp_avg_sq_hat_mean).pow_(1.0 / (3.0 - 2.0 * group['dampening'])).mul_(beta0)
).clamp_(0.0, 1.0 - group['eps'])
beta3 = (1.0 - beta1).pow_(group['dampening'])
beta1_prod = state['beta1_prod']
beta1_prod.mul_(beta1)
exp_avg.mul_(beta1).addcmul_(beta3, grad)
exp_avg_hat = exp_avg.div(1.0 - beta1_prod).mul_(beta0_dp)
p.add_(exp_avg_hat, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.1, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, stable_weight_decay: bool = False, dampening: float = 1.0, use_gc: bool = False, eps: float = 0.001)
|
715,322 |
pytorch_optimizer.optimizer.adai
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.1, 0.99),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
stable_weight_decay: bool = False,
dampening: float = 1.0,
use_gc: bool = False,
eps: float = 1e-3,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.use_gc = use_gc
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'stable_weight_decay': stable_weight_decay,
'dampening': dampening,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.1, 0.99), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, stable_weight_decay: bool = False, dampening: float = 1.0, use_gc: bool = False, eps: float = 0.001)
|
715,325 |
pytorch_optimizer.optimizer.adai
|
__str__
| null |
def __str__(self) -> str:
return 'Adai'
|
(self) -> str
|
715,346 |
pytorch_optimizer.optimizer.adai
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
param_size: int = 0
exp_avg_sq_hat_sum: float = 0.0
for group in self.param_groups:
_, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
param_size += p.numel()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['beta1_prod'] = torch.ones_like(p)
state['step'] += 1
if self.use_gc:
centralize_gradient(grad, gc_conv_only=False)
bias_correction2: float = 1.0 - beta2 ** state['step']
if not group['stable_weight_decay'] and group['weight_decay'] > 0.0:
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
exp_avg_sq_hat_sum += exp_avg_sq.sum() / bias_correction2
if param_size == 0:
raise ZeroParameterSizeError()
exp_avg_sq_hat_mean = exp_avg_sq_hat_sum / param_size
for group in self.param_groups:
beta0, beta2 = group['betas']
beta0_dp: float = math.pow(beta0, 1.0 - group['dampening'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if group['stable_weight_decay'] and group['weight_decay'] > 0.0:
self.apply_weight_decay(
p=p,
grad=grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
bias_correction2: float = 1.0 - beta2 ** state['step']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg_sq_hat = exp_avg_sq / bias_correction2
beta1 = (
1.0
- (exp_avg_sq_hat / exp_avg_sq_hat_mean).pow_(1.0 / (3.0 - 2.0 * group['dampening'])).mul_(beta0)
).clamp_(0.0, 1.0 - group['eps'])
beta3 = (1.0 - beta1).pow_(group['dampening'])
beta1_prod = state['beta1_prod']
beta1_prod.mul_(beta1)
exp_avg.mul_(beta1).addcmul_(beta3, grad)
exp_avg_hat = exp_avg.div(1.0 - beta1_prod).mul_(beta0_dp)
p.add_(exp_avg_hat, alpha=-group['lr'])
return loss
|
(self)
|
715,361 |
pytorch_optimizer.optimizer.adamp
|
AdamP
|
Slowing Down the Slowdown for Momentum Optimizers on Scale-invariant Weights.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param delta: float. threshold that determines whether a set of parameters is scale invariant or not.
:param wd_ratio: float. relative weight decay applied on scale-invariant parameters compared to that applied
on scale-variant parameters.
:param use_gc: bool. use gradient centralization.
:param nesterov: bool. enables Nesterov momentum.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class AdamP(Optimizer, BaseOptimizer):
r"""Slowing Down the Slowdown for Momentum Optimizers on Scale-invariant Weights.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param delta: float. threshold that determines whether a set of parameters is scale invariant or not.
:param wd_ratio: float. relative weight decay applied on scale-invariant parameters compared to that applied
on scale-variant parameters.
:param use_gc: bool. use gradient centralization.
:param nesterov: bool. enables Nesterov momentum.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
delta: float = 0.1,
wd_ratio: float = 0.1,
use_gc: bool = False,
nesterov: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_range(wd_ratio, 'wd_ratio', 0.0, 1.0)
self.validate_non_negative(eps, 'eps')
self.use_gc = use_gc
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'delta': delta,
'wd_ratio': wd_ratio,
'nesterov': nesterov,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'AdamP'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if self.use_gc:
centralize_gradient(grad, gc_conv_only=False)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
inv_de_nom = exp_avg_sq.rsqrt().add_(group['eps']).mul_(bias_correction2_sq)
perturb = exp_avg.clone()
if group['nesterov']:
perturb.mul_(beta1).addcmul_(grad, inv_de_nom, value=1.0 - beta1)
else:
perturb.mul_(inv_de_nom)
wd_ratio: float = 1.0
if len(p.shape) > 1:
perturb, wd_ratio = projection(
p,
grad,
perturb,
group['delta'],
group['wd_ratio'],
group['eps'],
)
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
ratio=wd_ratio,
)
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'],
bias_correction1=bias_correction1,
)
p.add_(perturb, alpha=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, delta: float = 0.1, wd_ratio: float = 0.1, use_gc: bool = False, nesterov: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,363 |
pytorch_optimizer.optimizer.adamp
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
delta: float = 0.1,
wd_ratio: float = 0.1,
use_gc: bool = False,
nesterov: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_range(wd_ratio, 'wd_ratio', 0.0, 1.0)
self.validate_non_negative(eps, 'eps')
self.use_gc = use_gc
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'delta': delta,
'wd_ratio': wd_ratio,
'nesterov': nesterov,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, delta: float = 0.1, wd_ratio: float = 0.1, use_gc: bool = False, nesterov: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
715,366 |
pytorch_optimizer.optimizer.adamp
|
__str__
| null |
def __str__(self) -> str:
return 'AdamP'
|
(self) -> str
|
715,387 |
pytorch_optimizer.optimizer.adamp
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if self.use_gc:
centralize_gradient(grad, gc_conv_only=False)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
inv_de_nom = exp_avg_sq.rsqrt().add_(group['eps']).mul_(bias_correction2_sq)
perturb = exp_avg.clone()
if group['nesterov']:
perturb.mul_(beta1).addcmul_(grad, inv_de_nom, value=1.0 - beta1)
else:
perturb.mul_(inv_de_nom)
wd_ratio: float = 1.0
if len(p.shape) > 1:
perturb, wd_ratio = projection(
p,
grad,
perturb,
group['delta'],
group['wd_ratio'],
group['eps'],
)
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
ratio=wd_ratio,
)
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'],
bias_correction1=bias_correction1,
)
p.add_(perturb, alpha=-step_size)
return loss
|
(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.