file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/network_builder.py
|
import tensorflow as tf
import numpy as np
from rl_games.algos_tf14 import networks
from rl_games.common import object_factory
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class NetworkBuilder:
def __init__(self, **kwargs):
self.activations_factory = object_factory.ObjectFactory()
self.activations_factory.register_builder('relu', lambda **kwargs : tf.nn.relu)
self.activations_factory.register_builder('tanh', lambda **kwargs : tf.nn.tanh)
self.activations_factory.register_builder('sigmoid', lambda **kwargs : tf.nn.sigmoid)
self.activations_factory.register_builder('elu', lambda **kwargs : tf.nn.elu)
self.activations_factory.register_builder('selu', lambda **kwargs : tf.nn.selu)
self.activations_factory.register_builder('softplus', lambda **kwargs : tf.nn.softplus)
self.activations_factory.register_builder('None', lambda **kwargs : None)
self.init_factory = object_factory.ObjectFactory()
self.init_factory.register_builder('normc_initializer', lambda **kwargs : normc_initializer(**kwargs))
self.init_factory.register_builder('const_initializer', lambda **kwargs : tf.constant_initializer(**kwargs))
self.init_factory.register_builder('orthogonal_initializer', lambda **kwargs : tf.orthogonal_initializer(**kwargs))
self.init_factory.register_builder('glorot_normal_initializer', lambda **kwargs : tf.glorot_normal_initializer(**kwargs))
self.init_factory.register_builder('glorot_uniform_initializer', lambda **kwargs : tf.glorot_uniform_initializer(**kwargs))
self.init_factory.register_builder('variance_scaling_initializer', lambda **kwargs : tf.variance_scaling_initializer(**kwargs))
self.init_factory.register_builder('random_uniform_initializer', lambda **kwargs : tf.random_uniform_initializer(**kwargs))
self.init_factory.register_builder('None', lambda **kwargs : None)
self.regularizer_factory = object_factory.ObjectFactory()
self.regularizer_factory.register_builder('l1_regularizer', lambda **kwargs : tf.contrib.layers.l1_regularizer(**kwargs))
self.regularizer_factory.register_builder('l2_regularizer', lambda **kwargs : tf.contrib.layers.l2_regularizer(**kwargs))
self.regularizer_factory.register_builder('l1l2_regularizer', lambda **kwargs : tf.contrib.layers.l1l2_regularizer(**kwargs))
self.regularizer_factory.register_builder('None', lambda **kwargs : None)
def load(self, params):
pass
def build(self, name, **kwargs):
pass
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
def _noisy_dense(self, inputs, units, activation, kernel_initializer, kernel_regularizer, name):
return networks.noisy_dense(inputs, units, name, True, activation)
def _build_mlp(self,
name,
input,
units,
activation,
initializer,
regularizer,
norm_func_name = None,
dense_func = tf.layers.dense,
is_train=True):
out = input
ind = 0
for unit in units:
ind += 1
out = dense_func(out, units=unit,
activation=self.activations_factory.create(activation),
kernel_initializer = self.init_factory.create(**initializer),
kernel_regularizer = self.regularizer_factory.create(**regularizer),
#bias_initializer=tf.random_uniform_initializer(-0.1, 0.1),
name=name + str(ind))
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, training=is_train)
return out
def _build_lstm(self, name, input, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.float32, [batch_num])
states_ph = tf.placeholder(tf.float32, [games_num, 2*units])
lstm_out, lstm_state, initial_state = networks.openai_lstm(name, input, dones_ph=dones_ph, states_ph=states_ph, units=units, env_num=games_num, batch_num=batch_num)
return lstm_out, lstm_state, initial_state, dones_ph, states_ph
def _build_lstm2(self, name, inputs, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [games_num, 2*units])
hidden = tf.concat((inputs[0], inputs[1]), axis=1)
lstm_out, lstm_state, initial_state = networks.openai_lstm(name, hidden, dones_ph=dones_ph, states_ph=states_ph, units=units, env_num=games_num, batch_num=batch_num)
#lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
return lstm_out, lstm_state, initial_state, dones_ph, states_ph
def _build_lstm_sep(self, name, inputs, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.bool, [batch_num], name='lstm_masks')
states_ph = tf.placeholder(tf.float32, [games_num, 4*units], name='lstm_states')
statesa, statesc = tf.split(states_ph, 2, axis=1)
a_out, lstm_statea, initial_statea = networks.openai_lstm(name +'a', inputs[0], dones_ph=dones_ph, states_ph=statesa, units=units, env_num=games_num, batch_num=batch_num)
c_out, lstm_statec, initial_statec = networks.openai_lstm(name + 'c', inputs[1], dones_ph=dones_ph, states_ph=statesc, units=units, env_num=games_num, batch_num=batch_num)
lstm_state = tf.concat([lstm_statea, lstm_statec], axis=1)
initial_state = np.concatenate([initial_statea, initial_statec], axis=1)
#lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
return a_out, c_out, lstm_state, initial_state, dones_ph, states_ph
def _build_conv(self, ctype, **kwargs):
print('conv_name:', ctype)
if ctype == 'conv2d':
return self._build_cnn(**kwargs)
if ctype == 'conv1d':
return self._build_cnn1d(**kwargs)
def _build_cnn(self, name, input, convs, activation, initializer, regularizer, norm_func_name=None, is_train=True):
out = input
ind = 0
for conv in convs:
print(out.shape.as_list())
ind += 1
config = conv.copy()
config['filters'] = conv['filters']
config['padding'] = conv['padding']
config['kernel_size'] = [conv['kernel_size']] * 2
config['strides'] = [conv['strides']] * 2
config['activation'] = self.activations_factory.create(activation)
config['kernel_initializer'] = self.init_factory.create(**initializer)
config['kernel_regularizer'] = self.regularizer_factory.create(**regularizer)
config['name'] = name + str(ind)
out = tf.layers.conv2d(inputs=out, **config)
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, name='bn_'+ config['name'], training=is_train)
return out
def _build_cnn1d(self, name, input, convs, activation, initializer, regularizer, norm_func_name=None, is_train=True):
out = input
ind = 0
print('_build_cnn1d')
for conv in convs:
ind += 1
config = conv.copy()
config['activation'] = self.activations_factory.create(activation)
config['kernel_initializer'] = self.init_factory.create(**initializer)
config['kernel_regularizer'] = self.regularizer_factory.create(**regularizer)
config['name'] = name + str(ind)
#config['bias_initializer'] = tf.random_uniform_initializer,
# bias_initializer=tf.random_uniform_initializer(-0.1, 0.1)
out = tf.layers.conv1d(inputs=out, **config)
print('shapes of layer_' + str(ind), str(out.get_shape().as_list()))
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, training=is_train)
return out
class A2CBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_lstm = 'lstm' in params
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
if self.has_lstm:
self.lstm_units = params['lstm']['units']
self.concated = params['lstm']['concated']
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
batch_num = kwargs.pop('batch_num', 1)
games_num = kwargs.pop('games_num', 1)
is_train = kwargs.pop('is_train', True)
with tf.variable_scope(name, reuse=reuse):
actor_input = critic_input = input
if self.has_cnn:
cnn_args = {
'name' :'actor_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
actor_input = self._build_conv(**cnn_args)
actor_input = tf.contrib.layers.flatten(actor_input)
critic_input = actor_input
if self.separate:
cnn_args['name'] = 'critic_cnn'
critic_input = self._build_conv( **cnn_args)
critic_input = tf.contrib.layers.flatten(critic_input)
mlp_args = {
'name' :'actor_fc',
'input' : actor_input,
'units' :self.units,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out_actor = self._build_mlp(**mlp_args)
if self.separate:
mlp_args['name'] = 'critic_fc'
mlp_args['input'] = critic_input
out_critic = self._build_mlp(**mlp_args)
if self.has_lstm:
if self.concated:
out_actor, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm2('lstm', [out_actor, out_critic], self.lstm_units, batch_num, games_num)
out_critic = out_actor
else:
out_actor, out_critic, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm_sep('lstm_', [out_actor, out_critic], self.lstm_units, batch_num, games_num)
else:
if self.has_lstm:
out_actor, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm('lstm', out_actor, self.lstm_units, batch_num, games_num)
out_critic = out_actor
value = tf.layers.dense(out_critic, units = 1, kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.value_activation), name='value')
if self.is_continuous:
mu = tf.layers.dense(out_actor, units = actions_num, activation=self.activations_factory.create(self.space_config['mu_activation']),
kernel_initializer = self.init_factory.create(**self.space_config['mu_init']), name='mu')
if self.space_config['fixed_sigma']:
sigma_out = tf.get_variable(name='sigma_out', shape=(actions_num), initializer=self.init_factory.create(**self.space_config['sigma_init']), trainable=True)
else:
sigma_out = tf.layers.dense(out_actor, units = actions_num, kernel_initializer=self.init_factory.create(**self.space_config['sigma_init']), activation=self.activations_factory.create(self.space_config['sigma_activation']), name='sigma_out')
if self.has_lstm:
return mu, mu * 0 + sigma_out, value, states_ph, dones_ph, lstm_state, initial_state
return mu, mu * 0 + sigma_out, value
if self.is_discrete:
logits = tf.layers.dense(inputs=out_actor, units=actions_num, name='logits', kernel_initializer = self.init_factory.create(**self.initializer))
if self.has_lstm:
return logits, value, states_ph, dones_ph, lstm_state, initial_state
return logits, value
class DQNBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_dueling = params['dueling']
self.atoms = params['atoms']
self.is_noisy = params['noisy']
self.normalization = params.get('normalization', None)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
is_train = kwargs.pop('is_train', True)
if self.is_noisy:
dense_layer = self._noisy_dense
else:
dense_layer = tf.layers.dense
with tf.variable_scope(name, reuse=reuse):
out = input
if self.has_cnn:
cnn_args = {
'name' :'dqn_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out = self._build_conv(**cnn_args)
out = tf.contrib.layers.flatten(out)
mlp_args = {
'name' :'dqn_mlp',
'input' : out,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train,
'dense_func' : dense_layer
}
if self.is_dueling:
if len(self.units) > 1:
mlp_args['units'] = self.units[:-1]
out = self._build_mlp(**mlp_args)
hidden_value = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_val')
hidden_advantage = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_adv')
value = dense_layer(inputs=hidden_value, units=self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), activation=tf.identity, kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='value')
advantage = dense_layer(inputs=hidden_advantage, units= actions_num * self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='advantage')
advantage = tf.reshape(advantage, shape = [-1, actions_num, self.atoms])
value = tf.reshape(value, shape = [-1, 1, self.atoms])
q_values = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
else:
mlp_args['units'] = self.units
out = self._build_mlp('dqn_mlp', out, self.units, self.activation, self.initializer, self.regularizer)
q_values = dense_layer(inputs=out, units=actions_num *self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='q_vals')
q_values = tf.reshape(q_values, shape = [-1, actions_num, self.atoms])
if self.atoms == 1:
return tf.squeeze(q_values)
else:
return q_values
| 18,263 |
Python
| 51.034188 | 301 | 0.592345 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/a2c_continuous.py
|
from rl_games.common import tr_helpers, vecenv
from rl_games.algos_tf14 import networks
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque, OrderedDict
from tensorboardX import SummaryWriter
import gym
import ray
from datetime import datetime
def swap_and_flatten01(arr):
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
#(-1, 1) -> (low, high)
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
#(horizon_length, actions_num)
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = np.log(p0_sigma/p1_sigma + 1e-5)
c2 = (np.square(p0_sigma) + np.square(p1_mu - p0_mu))/(2.0 *(np.square(p1_sigma) + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = np.mean(np.sum(kl, axis = -1)) # returning mean between all steps of sum between all actions
return kl
def policy_kl_tf(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = tf.log(p1_sigma/p0_sigma + 1e-5)
c2 = (tf.square(p0_sigma) + tf.square(p1_mu - p0_mu))/(2.0 * (tf.square(p1_sigma) + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = tf.reduce_mean(tf.reduce_sum(kl, axis=-1)) # returning mean between all steps of sum between all actions
return kl
class A2CAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
self.name = base_name
self.actions_low = action_space.low
self.actions_high = action_space.high
self.env_name = config['env_name']
self.ppo = config['ppo']
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_actors = config['num_actors']
self.env_config = config.get('env_config', {})
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.num_agents = self.vec_env.get_number_of_agents()
self.horizon_length = config['horizon_length']
self.normalize_advantage = config['normalize_advantage']
self.config = config
self.state_shape = observation_space.shape
self.critic_coef = config['critic_coef']
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.sess = sess
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.normalize_input = self.config['normalize_input']
self.seq_len = self.config['seq_length']
self.dones = np.asarray([False]*self.num_actors, dtype=np.bool)
self.current_rewards = np.asarray([0]*self.num_actors, dtype=np.float32)
self.current_lengths = np.asarray([0]*self.num_actors, dtype=np.float32)
self.game_rewards = deque([], maxlen=100)
self.game_lengths = deque([], maxlen=100)
self.obs_ph = tf.placeholder('float32', (None, ) + self.state_shape, name = 'obs')
self.target_obs_ph = tf.placeholder('float32', (None, ) + self.state_shape, name = 'target_obs')
self.actions_num = action_space.shape[0]
self.actions_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'actions')
self.old_mu_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'old_mu_ph')
self.old_sigma_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'old_sigma_ph')
self.old_neglogp_actions_ph = tf.placeholder('float32', (None, ), name = 'old_logpactions')
self.rewards_ph = tf.placeholder('float32', (None,), name = 'rewards')
self.old_values_ph = tf.placeholder('float32', (None,), name = 'old_values')
self.advantages_ph = tf.placeholder('float32', (None,), name = 'advantages')
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.epoch_num = tf.Variable(tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
self.bounds_loss_coef = config.get('bounds_loss_coef', None)
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=config['max_epochs'], end_learning_rate=0.001, power=config.get('decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=config['max_epochs'], decay_rate = config['decay_rate'])
self.input_obs = self.obs_ph
self.input_target_obs = self.target_obs_ph
if observation_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_target_obs = tf.to_float(self.input_target_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = observation_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=True)
self.input_target_obs = self.moving_mean_std.normalize(self.input_target_obs, train=False)
games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.train_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.config['minibatch_size'],
'games_num' : games_num,
'actions_num' : self.actions_num,
'prev_actions_ph' : self.actions_ph,
}
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_target_obs,
'batch_num' : self.num_actors,
'games_num' : self.num_actors,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
}
self.states = None
if self.network.is_rnn():
self.neglogp_actions ,self.state_values, self.action, self.entropy, self.mu, self.sigma, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_mu, self.target_sigma, self.target_states_ph, self.target_masks_ph, self.target_lstm_state, self.target_initial_state = self.network(self.run_dict, reuse=True)
self.states = self.target_initial_state
else:
self.neglogp_actions ,self.state_values, self.action, self.entropy, self.mu, self.sigma = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_mu, self.target_sigma = self.network(self.run_dict, reuse=True)
curr_e_clip = self.e_clip * self.lr_multiplier
if (self.ppo):
self.prob_ratio = tf.exp(self.old_neglogp_actions_ph - self.neglogp_actions)
self.prob_ratio = tf.clip_by_value(self.prob_ratio, 0.0, 16.0)
self.pg_loss_unclipped = -tf.multiply(self.advantages_ph, self.prob_ratio)
self.pg_loss_clipped = -tf.multiply(self.advantages_ph, tf.clip_by_value(self.prob_ratio, 1.- curr_e_clip, 1.+ curr_e_clip))
self.actor_loss = tf.reduce_mean(tf.maximum(self.pg_loss_unclipped, self.pg_loss_clipped))
else:
self.actor_loss = tf.reduce_mean(self.neglogp_actions * self.advantages_ph)
self.c_loss = (tf.squeeze(self.state_values) - self.rewards_ph)**2
if self.clip_value:
self.cliped_values = self.old_values_ph + tf.clip_by_value(tf.squeeze(self.state_values) - self.old_values_ph, -curr_e_clip, curr_e_clip)
self.c_loss_clipped = tf.square(self.cliped_values - self.rewards_ph)
self.critic_loss = tf.reduce_mean(tf.maximum(self.c_loss, self.c_loss_clipped))
else:
self.critic_loss = tf.reduce_mean(self.c_loss)
self._calc_kl_dist()
self.loss = self.actor_loss + 0.5 * self.critic_coef * self.critic_loss - self.config['entropy_coef'] * self.entropy
self._apply_bound_loss()
self.reg_loss = tf.losses.get_regularization_loss()
self.loss += self.reg_loss
self.train_step = tf.train.AdamOptimizer(self.current_lr * self.lr_multiplier)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
grads = tf.gradients(self.loss, self.weights)
if self.config['truncate_grads']:
grads, _ = tf.clip_by_global_norm(grads, self.grad_norm)
grads = list(zip(grads, self.weights))
self.train_op = self.train_step.apply_gradients(grads)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def _calc_kl_dist(self):
self.kl_dist = policy_kl_tf(self.mu, self.sigma, self.old_mu_ph, self.old_sigma_ph)
if self.is_adaptive_lr:
self.current_lr = tf.where(self.kl_dist > (2.0 * self.kl_threshold), tf.maximum(self.current_lr / 1.5, 1e-6), self.current_lr)
self.current_lr = tf.where(self.kl_dist < (0.5 * self.kl_threshold), tf.minimum(self.current_lr * 1.5, 1e-2), self.current_lr)
def _apply_bound_loss(self):
if self.bounds_loss_coef:
soft_bound = 1.1
mu_loss_high = tf.square(tf.maximum(0.0, self.mu - soft_bound))
mu_loss_low = tf.square(tf.maximum(0.0, -soft_bound - self.mu))
self.bounds_loss = tf.reduce_sum(mu_loss_high + mu_loss_low, axis=1)
self.loss += self.bounds_loss * self.bounds_loss_coef
else:
self.bounds_loss = None
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def get_action_values(self, obs):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp, self.target_mu, self.target_sigma]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.target_obs_ph : obs}), None)
def get_values(self, obs):
if self.network.is_rnn():
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs})
def play_steps(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs, mb_mus, mb_sigmas = [],[],[],[],[],[],[],[]
mb_states = []
epinfos = []
# For n in range number of steps
for _ in range(self.horizon_length):
if self.network.is_rnn():
mb_states.append(self.states)
actions, values, neglogpacs, mu, sigma, self.states = self.get_action_values(self.obs)
#actions = np.squeeze(actions)
values = np.squeeze(values)
neglogpacs = np.squeeze(neglogpacs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones.copy())
mb_mus.append(mu)
mb_sigmas.append(sigma)
self.obs[:], rewards, self.dones, infos = self.vec_env.step(rescale_actions(self.actions_low, self.actions_high, np.clip(actions, -1.0, 1.0)))
self.current_rewards += rewards
self.current_lengths += 1
for reward, length, done in zip(self.current_rewards, self.current_lengths, self.dones):
if done:
self.game_rewards.append(reward)
self.game_lengths.append(length)
shaped_rewards = self.rewards_shaper(rewards)
epinfos.append(infos)
mb_rewards.append(shaped_rewards)
self.current_rewards = self.current_rewards * (1.0 - self.dones)
self.current_lengths = self.current_lengths * (1.0 - self.dones)
#using openai baseline approach
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_mus = np.asarray(mb_mus, dtype=np.float32)
mb_sigmas = np.asarray(mb_sigmas, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_states = np.asarray(mb_states, dtype=np.float32)
last_values = self.get_values(self.obs)
last_values = np.squeeze(last_values)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
if self.network.is_rnn():
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_mus, mb_sigmas, mb_states )), epinfos)
else:
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_mus, mb_sigmas)), None, epinfos)
return result
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def train(self):
max_epochs = self.config.get('max_epochs', 1e6)
self.obs = self.vec_env.reset()
batch_size = self.horizon_length * self.num_actors * self.num_agents
minibatch_size = self.config['minibatch_size']
mini_epochs_num = self.config['mini_epochs']
num_minibatches = batch_size // minibatch_size
last_lr = self.config['learning_rate']
self.last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
while True:
play_time_start = time.time()
epoch_num = self.update_epoch()
frame += batch_size
obses, returns, dones, actions, values, neglogpacs, mus, sigmas, lstm_states, _ = self.play_steps()
advantages = returns - values
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
a_losses = []
c_losses = []
b_losses = []
entropies = []
kls = []
play_time_end = time.time()
play_time = play_time_end - play_time_start
update_time_start = time.time()
if self.network.is_rnn():
total_games = batch_size // self.seq_len
num_games_batch = minibatch_size // self.seq_len
game_indexes = np.arange(total_games)
flat_indexes = np.arange(total_games * self.seq_len).reshape(total_games, self.seq_len)
lstm_states = lstm_states[::self.seq_len]
for _ in range(0, mini_epochs_num):
np.random.shuffle(game_indexes)
for i in range(0, num_minibatches):
batch = range(i * num_games_batch, (i + 1) * num_games_batch)
mb_indexes = game_indexes[batch]
mbatch = flat_indexes[mb_indexes].ravel()
dict = {}
dict[self.old_values_ph] = values[mbatch]
dict[self.old_neglogp_actions_ph] = neglogpacs[mbatch]
dict[self.advantages_ph] = advantages[mbatch]
dict[self.rewards_ph] = returns[mbatch]
dict[self.actions_ph] = actions[mbatch]
dict[self.obs_ph] = obses[mbatch]
dict[self.old_mu_ph] = mus[mbatch]
dict[self.old_sigma_ph] = sigmas[mbatch]
dict[self.masks_ph] = dones[mbatch]
dict[self.states_ph] = lstm_states[mb_indexes]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_dist, self.current_lr, self.mu, self.sigma, self.lr_multiplier]
if self.bounds_loss is not None:
run_ops.append(self.bounds_loss)
run_ops.append(self.train_op)
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
res_dict = self.sess.run(run_ops, dict)
a_loss = res_dict[0]
c_loss = res_dict[1]
entropy = res_dict[2]
kl = res_dict[3]
last_lr = res_dict[4]
cmu = res_dict[5]
csigma = res_dict[6]
lr_mul = res_dict[7]
if self.bounds_loss is not None:
b_loss = res_dict[8]
b_losses.append(b_loss)
mus[mbatch] = cmu
sigmas[mbatch] = csigma
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
else:
for _ in range(0, mini_epochs_num):
permutation = np.random.permutation(batch_size)
obses = obses[permutation]
returns = returns[permutation]
actions = actions[permutation]
values = values[permutation]
neglogpacs = neglogpacs[permutation]
advantages = advantages[permutation]
mus = mus[permutation]
sigmas = sigmas[permutation]
for i in range(0, num_minibatches):
batch = range(i * minibatch_size, (i + 1) * minibatch_size)
dict = {self.obs_ph: obses[batch], self.actions_ph : actions[batch], self.rewards_ph : returns[batch],
self.advantages_ph : advantages[batch], self.old_neglogp_actions_ph : neglogpacs[batch], self.old_values_ph : values[batch]}
dict[self.old_mu_ph] = mus[batch]
dict[self.old_sigma_ph] = sigmas[batch]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_dist, self.current_lr, self.mu, self.sigma, self.lr_multiplier]
if self.bounds_loss is not None:
run_ops.append(self.bounds_loss)
run_ops.append(self.train_op)
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
res_dict = self.sess.run(run_ops, dict)
a_loss = res_dict[0]
c_loss = res_dict[1]
entropy = res_dict[2]
kl = res_dict[3]
last_lr = res_dict[4]
cmu = res_dict[5]
csigma = res_dict[6]
lr_mul = res_dict[7]
if self.bounds_loss is not None:
b_loss = res_dict[8]
b_losses.append(b_loss)
mus[batch] = cmu
sigmas[batch] = csigma
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
update_time_end = time.time()
update_time = update_time_end - update_time_start
sum_time = update_time + play_time
total_time = update_time_end - start_time
if self.rank == 0:
scaled_time = sum_time # self.num_agents *
scaled_play_time = play_time # self.num_agents *
if self.print_stats:
fps_step = batch_size / scaled_play_time
fps_total = batch_size / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
# performance
self.writer.add_scalar('performance/total_fps', batch_size / sum_time, frame)
self.writer.add_scalar('performance/step_fps', batch_size / play_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('performance/update_time', update_time, frame)
# losses
self.writer.add_scalar('losses/a_loss', np.mean(a_losses), frame)
self.writer.add_scalar('losses/c_loss', np.mean(c_losses), frame)
if len(b_losses) > 0:
self.writer.add_scalar('losses/bounds_loss', np.mean(b_losses), frame)
self.writer.add_scalar('losses/entropy', np.mean(entropies), frame)
# info
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', np.mean(kls), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
if len(self.game_rewards) > 0:
mean_rewards = np.mean(self.game_rewards)
mean_lengths = np.mean(self.game_lengths)
self.writer.add_scalar('rewards/frame', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/frame', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.name)
if self.last_mean_rewards > self.config['score_to_win']:
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
if epoch_num > max_epochs:
print('MAX EPOCHS NUM!')
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
update_time = 0
| 24,499 |
Python
| 48.295775 | 253 | 0.561982 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_cartpole_masked_velocity_rnn.yaml
|
#Cartpole without velocities lstm test
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [64, 64]
activation: relu
normalization: 'layer_norm'
norm_only_first_layer: True
initializer:
name: default
regularizer:
name: None
rnn:
name: 'lstm'
units: 64
layers: 1
before_mlp: False
concat_input: True
layer_norm: True
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: cartpole_vel_info
score_to_win: 500
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: CartPoleMaskedVelocity-v1
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 4
| 1,117 |
YAML
| 17.327869 | 39 | 0.598926 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppg_walker.yaml
|
params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_ppg
score_to_win: 290
grad_norm: 0.5
entropy_coef: 0 #-0.005
truncate_grads: False
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 1
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.004
normalize_input: False
bounds_loss_coef: 0.0005
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 1024
mini_epochs: 6
player:
render: True
determenistic: True
games_num: 200
| 1,536 |
YAML
| 20.347222 | 56 | 0.558594 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_flex_humanoid_torch.yaml
|
params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [256,128,64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: 'nn/humanoid_torch.pth'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'humanoid_torch'
score_to_win : 20000
grad_norm : 0.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexHumanoid
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 32
minibatch_size : 4096
mini_epochs : 4
critic_coef : 1
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : False
normalize_value : True
bounds_loss_coef: 0.000
max_epochs: 12000
| 1,468 |
YAML
| 18.851351 | 39 | 0.547684 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_reacher.yaml
|
params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128]
activation: relu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn1:
name: lstm
units: 64
layers: 1
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: ReacherPyBulletEnv-v0
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: none
kl_threshold: 0.008
normalize_input: True
seq_length: 16
bounds_loss_coef: 0.00
max_epochs: 10000
weight_decay: 0.0001
player:
render: True
games_num: 200
experiment_config1:
start_exp: 0
start_sub_exp: 0
experiments:
- exp:
- path: config.bounds_loss_coef
value: [0.5]
| 1,593 |
YAML
| 18.925 | 56 | 0.549278 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_revenge_rnd.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
value_shape: 2
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
config:
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.999
tau: 0.9
learning_rate: 1e-4
name: atari
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.002
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.1
clip_value: True
num_actors: 32
horizon_length: 512
minibatch_size: 4096
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
env_config:
name: MontezumaRevengeNoFrameskip-v4
rnd_config:
scale_value: 1.0
episodic: True
episode_length: 256
gamma: 0.99
mini_epochs: 2
minibatch_size: 1024
learning_rate: 1e-4
network:
name: rnd_curiosity
cnn:
type: conv2d
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnd:
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
net:
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
rnd:
units: [512,512, 512]
net:
units: [512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
scale: 2
| 3,072 |
YAML
| 21.762963 | 42 | 0.427083 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_flex_humanoid_torch_rnn.yaml
|
params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
normalization: 'layer_norm'
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.01
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [256,128]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: lstm
units: 64
layers: 1
before_mlp: False
load_checkpoint: True
load_path: 'nn/humanoid_torch_rnn.pth'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 8e-4
name : 'humanoid_torch_rnn'
score_to_win : 20000
grad_norm : 5
entropy_coef : 0
truncate_grads : True
env_name : FlexHumanoid
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 256
minibatch_size : 8192
mini_epochs : 4
critic_coef : 1
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length: 16
bounds_loss_coef: 0.000
weight_decay: 0.001
max_epochs: 6000
| 1,608 |
YAML
| 18.621951 | 40 | 0.54291 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_cartpole.yaml
|
#Cartpole MLP
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [32, 32]
activation: relu
initializer:
name: default
regularizer:
name: None
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: cartpole_vel_info
score_to_win: 500
grad_norm: 1.0
entropy_coef: 0.01
truncate_grads: True
env_name: CartPole-v1
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 32
minibatch_size: 64
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
device: 'cuda:0'
| 878 |
YAML
| 16.235294 | 29 | 0.592255 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_continuous_lstm.yaml
|
params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_lstm_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: normc_initializer
std: 0.01
sigma_init:
name: const_initializer
value: 0.0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: relu
initializer:
name: normc_initializer
std: 1
regularizer:
name: 'None'
lstm:
units: 128
concated: False
load_checkpoint: False
load_path: 'nn/runBipedalWalkerHardcore-v2'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: walker_lstm
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.000
truncate_grads: True
env_name: BipedalWalkerHardcore-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.5
max_epochs: 5000
| 1,334 |
YAML
| 19.227272 | 45 | 0.561469 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/carracing_ppo.yaml
|
params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
load_checkpoint: False
load_path: 'nn/runCarRacing-v0'
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: racing
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.000
truncate_grads: True
env_name: CarRacing-v0
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: True
#lr_schedule: adaptive
# kl_threshold: 0.008
bounds_loss_coef: 0.001
# max_epochs: 5000
player:
render: True
deterministic: True
| 1,684 |
YAML
| 18.593023 | 33 | 0.541568 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/rainbow_dqn_breakout.yaml
|
params:
algo:
name: dqn
model:
name: dqn
load_checkpoint: False
load_path: 'nn/breakoutep=3638750.0rew=201.75'
network:
name: dqn
dueling: True
atoms: 51
noisy: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 'valid'
- filters: 64
kernel_size: 4
strides: 2
padding: 'valid'
- filters: 64
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
gamma : 0.99
learning_rate : 0.0001
steps_per_epoch : 4
batch_size : 32
epsilon : 0.00
min_epsilon : 0.00
epsilon_decay_frames : 1000000
num_epochs_to_copy : 10000
name : 'breakout'
env_name: BreakoutNoFrameskip-v4
is_double : True
score_to_win : 600
num_steps_fill_buffer : 100000
replay_buffer_type : 'prioritized'
replay_buffer_size : 1000000
priority_beta : 0.4
priority_alpha : 0.6
beta_decay_frames : 1000000
max_beta : 1
horizon_length : 3
episodes_to_log : 100
lives_reward : 5
atoms_num : 51
v_min : -10
v_max : 10
games_to_track : 100
lr_schedule : None
max_epochs: 10000000
| 1,525 |
YAML
| 19.346666 | 48 | 0.550164 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_smac.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 6h_vs_8z
frames: 2
random_invalid_step: False
| 979 |
YAML
| 17.148148 | 32 | 0.581205 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ppo_multiwalker.yaml
|
params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128, 64]
d2rl: False
activation: relu
initializer:
name: default
load_checkpoint: False
load_path: './nn/multiwalker.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 1e-4
name: multiwalker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: multiwalker_env
ppo: True
e_clip: 0.2
use_experimental_cv: False
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 3072 #768 #3072 #1536
mini_epochs: 4
critic_coef: 1
schedule_type: 'standard'
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
normalize_value: True
bounds_loss_coef: 0.0001
max_epochs: 10000
weight_decay: 0.0000
player:
render: True
games_num: 200
env_config:
central_value: True
use_prev_actions: True
apply_agent_ids: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 3e-4
clip_value: False
normalize_input: True
truncate_grads: False
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256, 128]
activation: elu
initializer:
name: default
| 1,881 |
YAML
| 20.632184 | 43 | 0.549176 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/dqn.yaml
|
params:
algo:
name: dqn
model:
name: dqn
load_checkpoint: False
load_path: path
network:
name: dqn
dueling: True
atoms: 1
noisy: False
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 'valid'
- filters: 64
kernel_size: 4
strides: 2
padding: 'valid'
- filters: 64
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 0.1
gamma : 0.99
learning_rate : 0.0005
steps_per_epoch : 4
batch_size : 128
epsilon : 0.90
min_epsilon : 0.02
epsilon_decay_frames : 100000
num_epochs_to_copy : 10000
name : 'pong_dddqn_config1'
env_name: PongNoFrameskip-v4
is_double : True
score_to_win : 20.9
num_steps_fill_buffer : 10000
replay_buffer_type : 'normal'
replay_buffer_size : 100000
priority_beta : 0.4
priority_alpha : 0.6
beta_decay_frames : 100000
max_beta : 1
horizon_length : 3
episodes_to_log : 10
lives_reward : 1
atoms_num : 1
games_to_track : 20
lr_schedule : polynom_decay
max_epochs: 100000
experiment_config:
start_exp: 0
start_sub_exp: 3
experiments:
# - exp:
# - path: config.learning_rate
# value: [0.0005, 0.0002]
- exp:
- path: network.initializer
value:
- name: variance_scaling_initializer
scale: 2
- name: glorot_normal_initializer
- name: glorot_uniform_initializer
- name: orthogonal_initializer
gain: 1.41421356237
- path: network.cnn.initializer
value:
- name: variance_scaling_initializer
scale: 2
- name: glorot_normal_initializer
- name: glorot_uniform_initializer
- name: orthogonal_initializer
gain: 1.41421356237
| 2,195 |
YAML
| 20.742574 | 46 | 0.553531 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/test/test_discrete.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [32,32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: False
multi_head_value: False
player:
games_num: 100
determenistic: True
| 1,207 |
YAML
| 17.584615 | 33 | 0.589892 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/atari/ppo_space_invaders_resnet.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/invaders_resnet.pth'
network:
name: resnet_actor_critic
separate: False
value_shape: 1
space:
discrete:
cnn:
conv_depths: [16, 32, 32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: default
rnn:
name: lstm
units: 256
layers: 1
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: invaders_resnet
score_to_win: 100000
grad_norm: 1.5
entropy_coef: 0.001
truncate_grads: True
env_name: 'atari_gym' #'openai_gym' #'PongNoFrameskip-v4' #
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: none
kl_threshold: 0.01
normalize_input: False
seq_length: 4
max_epochs: 200000
env_config:
skip: 3
name: 'SpaceInvadersNoFrameskip-v4'
episode_life: False
player:
render: True
games_num: 10
n_game_life: 1
determenistic: True
| 1,416 |
YAML
| 17.166666 | 63 | 0.565678 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/atari/ppo_pacman_torch.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/pacman_ff.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
#min_val: -1
#max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: pacman_ff_no_normalize
score_to_win: 50000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: 'atari_gym'
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: linear
schedule_entropy: True
normalize_input: False
normalize_value: True
max_epochs: 20000
env_config:
skip: 4
name: 'MsPacmanNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True
render_sleep: 0.05
| 1,692 |
YAML
| 18.686046 | 39 | 0.543144 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ma/ppo_slime_self_play.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/slime_pvp.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [128,64]
activation: elu
initializer:
name: default
regularizer:
name: 'None'
config:
name: slime_pvp2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: slime_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 500
self_play_config:
update_score: 1
games_to_check: 200
check_scores : False
env_config:
name: SlimeVolleyDiscrete-v0
#neg_scale: 1 #0.5
self_play: True
config_path: 'rl_games/configs/ma/ppo_slime_self_play.yaml'
player:
render: True
games_num: 200
n_game_life: 1
determenistic: True
device_name: 'cpu'
| 1,294 |
YAML
| 18.328358 | 65 | 0.59119 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ma/ppo_connect4_self_play.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/connect4.pth'
network:
name: actor_critic
separate: False
normalization: batch_norm
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
config:
name: connect4_3
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: connect4_env
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 1000
use_action_masks: True
weight_decay: 0.001
self_play_config:
update_score: 0.1
games_to_check: 100
env_update_num: 8
env_config:
name: connect_four_v0
self_play: True
is_human: False
random_agent: False
config_path: 'rl_games/configs/ma/ppo_connect4_self_play.yaml'
| 1,735 |
YAML
| 19.915662 | 68 | 0.563689 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ma/ppo_slime_v0.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [128,64]
activation: elu
initializer:
name: default
regularizer:
name: 'None'
config:
name: slime
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: slime_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: False
ignore_dead_batches : False
env_config:
name: SlimeVolleyDiscrete-v0
player:
render: True
games_num: 200
n_game_life: 1
determenistic: True
| 1,093 |
YAML
| 16.645161 | 34 | 0.590119 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/ma/ppo_connect4_self_play_resnet.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/connect4_rn.pth'
network:
name: connect4net
blocks: 5
config:
name: connect4_rn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: connect4_env
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 4
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 1000
use_action_masks: True
weight_decay: 0.001
self_play_config:
update_score: 0.1
games_to_check: 100
env_update_num: 4
env_config:
name: connect_four_v0
self_play: True
is_human: True
random_agent: False
config_path: 'rl_games/configs/ma/ppo_connect4_self_play_resnet.yaml'
| 1,052 |
YAML
| 19.25 | 75 | 0.613118 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/minigrid/minigrid_rnn.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
#normalization: 'layer_norm'
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 16
kernel_size: 8
strides: 4
padding: 0
- filters: 32
kernel_size: 4
strides: 2
padding: 0
mlp:
units: [128]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
rnn:
name: 'lstm'
units: 128
layers: 1
before_mlp: True
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: minigrid_env_rnn
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: minigrid_env
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
env_config:
#action_bonus: True
#state_bonus : True
name: MiniGrid-MemoryS7-v0
fully_obs: False
player:
games_num: 100
render: True
determenistic: False
| 1,629 |
YAML
| 18.404762 | 39 | 0.553714 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/configs/procgen/ppo_coinrun.yaml
|
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: resnet_actor_critic
separate: False
value_shape: 1
space:
discrete:
cnn:
conv_depths: [16, 32, 32]
activation: elu
initializer:
name: default
regularizer:
name: 'None'
mlp:
units: [512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
rnn1:
name: lstm
units: 256
layers: 1
config:
reward_shaper:
max_val: 10
normalize_advantage: True
gamma: 0.999
tau: 0.95
learning_rate: 1e-4
name: atari
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: 'openai_gym' #'openai_gym' #'PongNoFrameskip-v4' #
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 3
critic_coef: 1
lr_schedule: polynom_decay
kl_threshold: 0.01
normalize_input: False
seq_length: 4
max_epochs: 2000
env_config:
name: "procgen:procgen-coinrun-v0"
procgen: True
frames: 4
num_levels: 1000
start_level: 323
limit_steps: True
distribution_mode: 'easy'
| 1,372 |
YAML
| 18.069444 | 64 | 0.561953 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/distributed/hvd_wrapper.py
|
import torch
import horovod.torch as hvd
import os
class HorovodWrapper:
def __init__(self):
hvd.init()
self.rank = hvd.rank()
self.rank_size = hvd.size()
print('Starting horovod with rank: {0}, size: {1}'.format(self.rank, self.rank_size))
#self.device_name = 'cpu'
self.device_name = 'cuda:' + str(self.rank)
def update_algo_config(self, config):
config['device'] = self.device_name
if self.rank != 0:
config['print_stats'] = False
config['lr_schedule'] = None
return config
def setup_algo(self, algo):
hvd.broadcast_parameters(algo.model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(algo.optimizer, root_rank=0)
algo.optimizer = hvd.DistributedOptimizer(algo.optimizer, named_parameters=algo.model.named_parameters())
self.sync_stats(algo)
if algo.has_central_value:
hvd.broadcast_optimizer_state(algo.central_value_net.optimizer, root_rank=0)
hvd.broadcast_parameters(algo.central_value_net.state_dict(), root_rank=0)
algo.central_value_net.optimizer = hvd.DistributedOptimizer(algo.central_value_net.optimizer, named_parameters=algo.central_value_net.model.named_parameters())
def sync_stats(self, algo):
stats_dict = algo.get_stats_weights()
for k,v in stats_dict.items():
for in_k, in_v in v.items():
in_v.data = hvd.allreduce(in_v, name=k + in_k)
algo.curr_frames = hvd.allreduce(torch.tensor(algo.curr_frames), average=False).item()
def broadcast_value(self, val, name):
hvd.broadcast_parameters({name: val}, root_rank=0)
def is_root(self):
return self.rank == 0
def average_stats(self, stats_dict):
res_dict = {}
for k,v in stats_dict.items():
res_dict[k] = self.metric_average(v, k)
def average_value(self, val, name):
avg_tensor = hvd.allreduce(val, name=name)
return avg_tensor
| 2,030 |
Python
| 35.927272 | 171 | 0.625123 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/interval_summary_writer.py
|
import time
class IntervalSummaryWriter:
"""
Summary writer wrapper designed to reduce the size of tf.events files.
It will prevent the learner from writing the summaries more often than a specified interval, i.e. if the
current interval is 20 seconds and we wrote our last summary for a particular summary key at 01:00, all summaries
until 01:20 for that key will be ignored.
The interval is adaptive: it will approach 1/200th of the total training time, but no less than interval_sec_min
and no greater than interval_sec_max.
This was created to facilitate really big training runs, such as with Population-Based training, where summary
folders reached tens of gigabytes.
"""
def __init__(self, summary_writer, cfg):
self.experiment_start = time.time()
# prevents noisy summaries when experiments are restarted
self.defer_summaries_sec = cfg.get('defer_summaries_sec', 5)
self.interval_sec_min = cfg.get('summaries_interval_sec_min', 5)
self.interval_sec_max = cfg.get('summaries_interval_sec_max', 300)
self.last_interval = self.interval_sec_min
# interval between summaries will be close to this fraction of the total training time,
# i.e. for a run that lasted 200 minutes we write one summary every minute.
self.summaries_relative_step = 1.0 / 200
self.writer = summary_writer
self.last_write_for_tag = dict()
def _calc_interval(self):
"""Write summaries more often in the beginning of the run."""
if self.last_interval >= self.interval_sec_max:
return self.last_interval
seconds_since_start = time.time() - self.experiment_start
interval = seconds_since_start * self.summaries_relative_step
interval = min(interval, self.interval_sec_max)
interval = max(interval, self.interval_sec_min)
self.last_interval = interval
return interval
def add_scalar(self, tag, value, step, *args, **kwargs):
if step == 0:
# removes faulty summaries that appear after the experiment restart
# print('Skip summaries with step=0')
return
seconds_since_start = time.time() - self.experiment_start
if seconds_since_start < self.defer_summaries_sec:
return
last_write = self.last_write_for_tag.get(tag, 0)
seconds_since_last_write = time.time() - last_write
interval = self._calc_interval()
if seconds_since_last_write >= interval:
self.writer.add_scalar(tag, value, step, *args, **kwargs)
self.last_write_for_tag[tag] = time.time()
def __getattr__(self, attr):
return getattr(self.writer, attr)
| 2,750 |
Python
| 40.681818 | 117 | 0.663273 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/a2c_common.py
|
import os
from rl_games.common import tr_helpers
from rl_games.common import vecenv
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.algos_torch.moving_mean_std import MovingMeanStd
from rl_games.algos_torch.self_play_manager import SelfPlayManager
from rl_games.algos_torch import torch_ext
from rl_games.common import schedulers
from rl_games.common.experience import ExperienceBuffer
from rl_games.common.interval_summary_writer import IntervalSummaryWriter
import numpy as np
import collections
import time
from collections import deque, OrderedDict
import gym
from datetime import datetime
from tensorboardX import SummaryWriter
import torch
from torch import nn
from time import sleep
def swap_and_flatten01(arr):
"""
swap and then flatten axes 0 and 1
"""
if arr is None:
return arr
s = arr.size()
return arr.transpose(0, 1).reshape(s[0] * s[1], *s[2:])
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class A2CBase:
def __init__(self, base_name, config):
pbt_str = ''
if config.get('population_based_training', False):
# in PBT, make sure experiment name contains a unique id of the policy within a population
pbt_str = f'_pbt_{config["pbt_idx"]:02d}'
# This helps in PBT when we need to restart an experiment with the exact same name, rather than
# generating a new name with the timestamp every time.
full_experiment_name = config.get('full_experiment_name', None)
if full_experiment_name:
print(f'Exact experiment name requested from command line: {full_experiment_name}')
self.experiment_name = full_experiment_name
else:
self.experiment_name = config['name'] + pbt_str + datetime.now().strftime("_%d-%H-%M-%S")
self.config = config
self.algo_observer = config['features']['observer']
self.algo_observer.before_init(base_name, config, self.experiment_name)
self.multi_gpu = config.get('multi_gpu', False)
self.rank = 0
self.rank_size = 1
if self.multi_gpu:
from rl_games.distributed.hvd_wrapper import HorovodWrapper
self.hvd = HorovodWrapper()
self.config = self.hvd.update_algo_config(config)
self.rank = self.hvd.rank
self.rank_size = self.hvd.rank_size
self.network_path = config.get('network_path', "./nn/")
self.log_path = config.get('log_path', "runs/")
self.env_config = config.get('env_config', {})
self.num_actors = config['num_actors']
self.env_name = config['env_name']
self.env_info = config.get('env_info')
if self.env_info is None:
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.env_info = self.vec_env.get_env_info()
self.ppo_device = config.get('device', 'cuda:0')
print('Env info:')
print(self.env_info)
self.value_size = self.env_info.get('value_size',1)
self.observation_space = self.env_info['observation_space']
self.weight_decay = config.get('weight_decay', 0.0)
self.use_action_masks = config.get('use_action_masks', False)
self.is_train = config.get('is_train', True)
self.central_value_config = self.config.get('central_value_config', None)
self.has_central_value = self.central_value_config is not None
self.truncate_grads = self.config.get('truncate_grads', False)
if self.has_central_value:
self.state_space = self.env_info.get('state_space', None)
if isinstance(self.state_space,gym.spaces.Dict):
self.state_shape = {}
for k,v in self.state_space.spaces.items():
self.state_shape[k] = v.shape
else:
self.state_shape = self.state_space.shape
self.self_play_config = self.config.get('self_play_config', None)
self.has_self_play_config = self.self_play_config is not None
self.self_play = config.get('self_play', False)
self.save_freq = config.get('save_frequency', 0)
self.save_best_after = config.get('save_best_after', 100)
self.print_stats = config.get('print_stats', True)
self.rnn_states = None
self.name = base_name
self.ppo = config['ppo']
self.max_epochs = self.config.get('max_epochs', 1e6)
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.linear_lr = config['lr_schedule'] == 'linear'
self.schedule_type = config.get('schedule_type', 'legacy')
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
self.scheduler = schedulers.AdaptiveScheduler(self.kl_threshold)
elif self.linear_lr:
self.scheduler = schedulers.LinearScheduler(float(config['learning_rate']),
max_steps=self.max_epochs,
apply_to_entropy=config.get('schedule_entropy', False),
start_entropy_coef=config.get('entropy_coef'))
else:
self.scheduler = schedulers.IdentityScheduler()
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_agents = self.env_info.get('agents', 1)
# self.horizon_length = config['horizon_length']
self.horizon_length = config['steps_num']
self.seq_len = self.config.get('seq_length', 4)
self.normalize_advantage = config['normalize_advantage']
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config.get('normalize_value', False)
self.truncate_grads = self.config.get('truncate_grads', False)
self.has_phasic_policy_gradients = False
if isinstance(self.observation_space,gym.spaces.Dict):
self.obs_shape = {}
for k,v in self.observation_space.spaces.items():
self.obs_shape[k] = v.shape
else:
self.obs_shape = self.observation_space.shape
self.critic_coef = config['critic_coef']
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.games_to_track = self.config.get('games_to_track', 100)
self.game_rewards = torch_ext.AverageMeter(self.value_size, self.games_to_track).to(self.ppo_device)
self.game_lengths = torch_ext.AverageMeter(1, self.games_to_track).to(self.ppo_device)
self.obs = None
self.games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.batch_size = self.horizon_length * self.num_actors * self.num_agents
self.batch_size_envs = self.horizon_length * self.num_actors
self.minibatch_size = self.config['minibatch_size']
self.mini_epochs_num = self.config['mini_epochs']
self.num_minibatches = self.batch_size // self.minibatch_size
assert(self.batch_size % self.minibatch_size == 0)
self.mixed_precision = self.config.get('mixed_precision', False)
self.scaler = torch.cuda.amp.GradScaler(enabled=self.mixed_precision)
self.last_lr = self.config['learning_rate']
self.frame = 0
self.update_time = 0
self.mean_rewards = self.last_mean_rewards = -100500
self.play_time = 0
self.epoch_num = 0
# allows us to specify a folder where all experiments will reside
self.train_dir = config.get('train_dir', 'train_dir')
# a folder inside of train_dir containing everything related to a particular experiment
# self.experiment_dir = os.path.join(self.train_dir, self.experiment_name)
self.experiment_dir = config.get('logdir', './')
# folders inside <train_dir>/<experiment_dir> for a specific purpose
self.nn_dir = os.path.join(self.experiment_dir, 'nn')
self.summaries_dir = os.path.join(self.experiment_dir, 'runs')
os.makedirs(self.train_dir, exist_ok=True)
os.makedirs(self.experiment_dir, exist_ok=True)
os.makedirs(self.nn_dir, exist_ok=True)
os.makedirs(self.summaries_dir, exist_ok=True)
self.entropy_coef = self.config['entropy_coef']
if self.rank == 0:
writer = SummaryWriter(self.summaries_dir)
self.writer = IntervalSummaryWriter(writer, self.config)
else:
self.writer = None
self.value_bootstrap = self.config.get('value_bootstrap')
if self.normalize_value:
self.value_mean_std = RunningMeanStd((1,)).to(self.ppo_device)
self.is_tensor_obses = False
self.last_rnn_indices = None
self.last_state_indices = None
#self_play
if self.has_self_play_config:
print('Initializing SelfPlay Manager')
self.self_play_manager = SelfPlayManager(self.self_play_config, self.writer)
# features
self.algo_observer = config['features']['observer']
self.soft_aug = config['features'].get('soft_augmentation', None)
self.has_soft_aug = self.soft_aug is not None
# soft augmentation not yet supported
assert not self.has_soft_aug
def write_stats(self, total_time, epoch_num, step_time, play_time, update_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul, frame, scaled_time, scaled_play_time, curr_frames):
# do we need scaled time?
self.writer.add_scalar('performance/step_inference_rl_update_fps', curr_frames / scaled_time, frame)
self.writer.add_scalar('performance/step_inference_fps', curr_frames / scaled_play_time, frame)
self.writer.add_scalar('performance/step_fps', curr_frames / step_time, frame)
self.writer.add_scalar('performance/rl_update_time', update_time, frame)
self.writer.add_scalar('performance/step_inference_time', play_time, frame)
self.writer.add_scalar('performance/step_time', step_time, frame)
self.writer.add_scalar('losses/a_loss', torch_ext.mean_list(a_losses).item(), frame)
self.writer.add_scalar('losses/c_loss', torch_ext.mean_list(c_losses).item(), frame)
self.writer.add_scalar('losses/entropy', torch_ext.mean_list(entropies).item(), frame)
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', torch_ext.mean_list(kls).item(), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
self.algo_observer.after_print_stats(frame, epoch_num, total_time)
def set_eval(self):
self.model.eval()
if self.normalize_input:
self.running_mean_std.eval()
if self.normalize_value:
self.value_mean_std.eval()
def set_train(self):
self.model.train()
if self.normalize_input:
self.running_mean_std.train()
if self.normalize_value:
self.value_mean_std.train()
def update_lr(self, lr):
if self.multi_gpu:
lr_tensor = torch.tensor([lr])
self.hvd.broadcast_value(lr_tensor, 'learning_rate')
lr = lr_tensor.item()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
#if self.has_central_value:
# self.central_value_net.update_lr(lr)
def get_action_values(self, obs):
processed_obs = self._preproc_obs(obs['obs'])
self.model.eval()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'rnn_states' : self.rnn_states
}
with torch.no_grad():
res_dict = self.model(input_dict)
if self.has_central_value:
states = obs['states']
input_dict = {
'is_train': False,
'states' : states,
#'actions' : res_dict['action'],
#'rnn_states' : self.rnn_states
}
value = self.get_central_value(input_dict)
res_dict['values'] = value
if self.normalize_value:
res_dict['values'] = self.value_mean_std(res_dict['values'], True)
return res_dict
def get_values(self, obs):
with torch.no_grad():
if self.has_central_value:
states = obs['states']
self.central_value_net.eval()
input_dict = {
'is_train': False,
'states' : states,
'actions' : None,
'is_done': self.dones,
}
value = self.get_central_value(input_dict)
else:
self.model.eval()
processed_obs = self._preproc_obs(obs['obs'])
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'rnn_states' : self.rnn_states
}
result = self.model(input_dict)
value = result['values']
if self.normalize_value:
value = self.value_mean_std(value, True)
return value
@property
def device(self):
return self.ppo_device
def reset_envs(self):
self.obs = self.env_reset()
def init_tensors(self):
batch_size = self.num_agents * self.num_actors
algo_info = {
'num_actors' : self.num_actors,
'horizon_length' : self.horizon_length,
'has_central_value' : self.has_central_value,
'use_action_masks' : self.use_action_masks
}
self.experience_buffer = ExperienceBuffer(self.env_info, algo_info, self.ppo_device)
val_shape = (self.horizon_length, batch_size, self.value_size)
current_rewards_shape = (batch_size, self.value_size)
self.current_rewards = torch.zeros(current_rewards_shape, dtype=torch.float32, device=self.ppo_device)
self.current_lengths = torch.zeros(batch_size, dtype=torch.float32, device=self.ppo_device)
self.dones = torch.ones((batch_size,), dtype=torch.uint8, device=self.ppo_device)
if self.is_rnn:
self.rnn_states = self.model.get_default_rnn_state()
self.rnn_states = [s.to(self.ppo_device) for s in self.rnn_states]
batch_size = self.num_agents * self.num_actors
num_seqs = self.horizon_length * batch_size // self.seq_len
assert((self.horizon_length * batch_size // self.num_minibatches) % self.seq_len == 0)
self.mb_rnn_states = [torch.zeros((s.size()[0], num_seqs, s.size()[2]), dtype = torch.float32, device=self.ppo_device) for s in self.rnn_states]
def init_rnn_from_model(self, model):
self.is_rnn = self.model.is_rnn()
def init_rnn_step(self, batch_size, mb_rnn_states):
mb_rnn_states = self.mb_rnn_states
mb_rnn_masks = torch.zeros(self.horizon_length*batch_size, dtype = torch.float32, device=self.ppo_device)
steps_mask = torch.arange(0, batch_size * self.horizon_length, self.horizon_length, dtype=torch.long, device=self.ppo_device)
play_mask = torch.arange(0, batch_size, 1, dtype=torch.long, device=self.ppo_device)
steps_state = torch.arange(0, batch_size * self.horizon_length//self.seq_len, self.horizon_length//self.seq_len, dtype=torch.long, device=self.ppo_device)
indices = torch.zeros((batch_size), dtype = torch.long, device=self.ppo_device)
return mb_rnn_masks, indices, steps_mask, steps_state, play_mask, mb_rnn_states
def process_rnn_indices(self, mb_rnn_masks, indices, steps_mask, steps_state, mb_rnn_states):
seq_indices = None
if indices.max().item() >= self.horizon_length:
return seq_indices, True
mb_rnn_masks[indices + steps_mask] = 1
seq_indices = indices % self.seq_len
state_indices = (seq_indices == 0).nonzero(as_tuple=False)
state_pos = indices // self.seq_len
rnn_indices = state_pos[state_indices] + steps_state[state_indices]
for s, mb_s in zip(self.rnn_states, mb_rnn_states):
mb_s[:, rnn_indices, :] = s[:, state_indices, :]
self.last_rnn_indices = rnn_indices
self.last_state_indices = state_indices
return seq_indices, False
def process_rnn_dones(self, all_done_indices, indices, seq_indices):
if len(all_done_indices) > 0:
shifts = self.seq_len - 1 - seq_indices[all_done_indices]
indices[all_done_indices] += shifts
for s in self.rnn_states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
indices += 1
def cast_obs(self, obs):
if isinstance(obs, torch.Tensor):
self.is_tensor_obses = True
elif isinstance(obs, np.ndarray):
assert(self.observation_space.dtype != np.int8)
if self.observation_space.dtype == np.uint8:
obs = torch.ByteTensor(obs).to(self.ppo_device)
else:
obs = torch.FloatTensor(obs).to(self.ppo_device)
return obs
def obs_to_tensors(self, obs):
obs_is_dict = isinstance(obs, dict)
if obs_is_dict:
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value)
else:
upd_obs = self.cast_obs(obs)
if not obs_is_dict or 'obs' not in obs:
upd_obs = {'obs' : upd_obs}
return upd_obs
def _obs_to_tensors_internal(self, obs):
if isinstance(obs, dict):
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value)
else:
upd_obs = self.cast_obs(obs)
return upd_obs
def preprocess_actions(self, actions):
if not self.is_tensor_obses:
actions = actions.cpu().numpy()
return actions
def env_step(self, actions):
actions = self.preprocess_actions(actions)
obs, rewards, dones, infos = self.vec_env.step(actions)
if self.is_tensor_obses:
if self.value_size == 1:
rewards = rewards.unsqueeze(1)
return self.obs_to_tensors(obs), rewards.to(self.ppo_device), dones.to(self.ppo_device), infos
else:
if self.value_size == 1:
rewards = np.expand_dims(rewards, axis=1)
return self.obs_to_tensors(obs), torch.from_numpy(rewards).to(self.ppo_device).float(), torch.from_numpy(dones).to(self.ppo_device), infos
def env_reset(self):
obs = self.vec_env.reset()
obs = self.obs_to_tensors(obs)
return obs
def discount_values(self, fdones, last_extrinsic_values, mb_fdones, mb_extrinsic_values, mb_rewards):
lastgaelam = 0
mb_advs = torch.zeros_like(mb_rewards)
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - fdones
nextvalues = last_extrinsic_values
else:
nextnonterminal = 1.0 - mb_fdones[t+1]
nextvalues = mb_extrinsic_values[t+1]
nextnonterminal = nextnonterminal.unsqueeze(1)
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_extrinsic_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
return mb_advs
def discount_values_masks(self, fdones, last_extrinsic_values, mb_fdones, mb_extrinsic_values, mb_rewards, mb_masks):
lastgaelam = 0
mb_advs = torch.zeros_like(mb_rewards)
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - fdones
nextvalues = last_extrinsic_values
else:
nextnonterminal = 1.0 - mb_fdones[t+1]
nextvalues = mb_extrinsic_values[t+1]
nextnonterminal = nextnonterminal.unsqueeze(1)
masks_t = mb_masks[t].unsqueeze(1)
delta = (mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_extrinsic_values[t])
mb_advs[t] = lastgaelam = (delta + self.gamma * self.tau * nextnonterminal * lastgaelam) * masks_t
return mb_advs
def clear_stats(self):
batch_size = self.num_agents * self.num_actors
self.game_rewards.clear()
self.game_lengths.clear()
self.mean_rewards = self.last_mean_rewards = -100500
self.algo_observer.after_clear_stats()
def update_epoch(self):
pass
def train(self):
pass
def prepare_dataset(self, batch_dict):
pass
def train_epoch(self):
self.vec_env.set_train_info(self.frame)
def train_actor_critic(self, obs_dict, opt_step=True):
pass
def calc_gradients(self):
pass
def get_central_value(self, obs_dict):
return self.central_value_net.get_value(obs_dict)
def train_central_value(self):
return self.central_value_net.train_net()
def get_full_state_weights(self):
state = self.get_weights()
state['epoch'] = self.epoch_num
state['optimizer'] = self.optimizer.state_dict()
if self.has_central_value:
state['assymetric_vf_nets'] = self.central_value_net.state_dict()
state['frame'] = self.frame
# This is actually the best reward ever achieved. last_mean_rewards is perhaps not the best variable name
# We save it to the checkpoint to prevent overriding the "best ever" checkpoint upon experiment restart
state['last_mean_rewards'] = self.last_mean_rewards
env_state = self.vec_env.get_env_state()
state['env_state'] = env_state
return state
def set_full_state_weights(self, weights):
self.set_weights(weights)
self.epoch_num = weights['epoch']
if self.has_central_value:
self.central_value_net.load_state_dict(weights['assymetric_vf_nets'])
self.optimizer.load_state_dict(weights['optimizer'])
self.frame = weights.get('frame', 0)
self.last_mean_rewards = weights.get('last_mean_rewards', -100500)
env_state = weights.get('env_state', None)
self.vec_env.set_env_state(env_state)
def get_weights(self):
state = self.get_stats_weights()
state['model'] = self.model.state_dict()
return state
def get_stats_weights(self):
state = {}
if self.normalize_input:
state['running_mean_std'] = self.running_mean_std.state_dict()
if self.normalize_value:
state['reward_mean_std'] = self.value_mean_std.state_dict()
if self.has_central_value:
state['assymetric_vf_mean_std'] = self.central_value_net.get_stats_weights()
if self.mixed_precision:
state['scaler'] = self.scaler.state_dict()
return state
def set_stats_weights(self, weights):
if self.normalize_input:
self.running_mean_std.load_state_dict(weights['running_mean_std'])
if self.normalize_value:
self.value_mean_std.load_state_dict(weights['reward_mean_std'])
if self.has_central_value:
self.central_value_net.set_stats_weights(weights['assymetric_vf_mean_std'])
if self.mixed_precision and 'scaler' in weights:
self.scaler.load_state_dict(weights['scaler'])
def set_weights(self, weights):
self.model.load_state_dict(weights['model'])
self.set_stats_weights(weights)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
for k,v in obs_batch.items():
obs_batch[k] = self._preproc_obs(v)
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
if self.normalize_input:
obs_batch = self.running_mean_std(obs_batch)
return obs_batch
def play_steps(self):
epinfos = []
update_list = self.update_list
step_time = 0.0
for n in range(self.horizon_length):
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
res_dict = self.get_masked_action_values(self.obs, masks)
else:
res_dict = self.get_action_values(self.obs)
self.experience_buffer.update_data('obses', n, self.obs['obs'])
self.experience_buffer.update_data('dones', n, self.dones)
for k in update_list:
self.experience_buffer.update_data(k, n, res_dict[k])
if self.has_central_value:
self.experience_buffer.update_data('states', n, self.obs['states'])
step_time_start = time.time()
self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions'])
step_time_end = time.time()
step_time += (step_time_end - step_time_start)
shaped_rewards = self.rewards_shaper(rewards)
if self.value_bootstrap and 'time_outs' in infos:
shaped_rewards += self.gamma * res_dict['values'] * self.cast_obs(infos['time_outs']).unsqueeze(1).float()
self.experience_buffer.update_data('rewards', n, shaped_rewards)
self.current_rewards += rewards
self.current_lengths += 1
all_done_indices = self.dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
self.algo_observer.process_infos(infos, done_indices)
not_dones = 1.0 - self.dones.float()
self.current_rewards = self.current_rewards * not_dones.unsqueeze(1)
self.current_lengths = self.current_lengths * not_dones
last_values = self.get_values(self.obs)
fdones = self.dones.float()
mb_fdones = self.experience_buffer.tensor_dict['dones'].float()
mb_values = self.experience_buffer.tensor_dict['values']
mb_rewards = self.experience_buffer.tensor_dict['rewards']
mb_advs = self.discount_values(fdones, last_values, mb_fdones, mb_values, mb_rewards)
mb_returns = mb_advs + mb_values
batch_dict = self.experience_buffer.get_transformed_list(swap_and_flatten01, self.tensor_list)
batch_dict['returns'] = swap_and_flatten01(mb_returns)
batch_dict['played_frames'] = self.batch_size
batch_dict['step_time'] = step_time
return batch_dict
def play_steps_rnn(self):
mb_rnn_states = []
epinfos = []
self.experience_buffer.tensor_dict['values'].fill_(0)
self.experience_buffer.tensor_dict['rewards'].fill_(0)
self.experience_buffer.tensor_dict['dones'].fill_(1)
step_time = 0.0
update_list = self.update_list
batch_size = self.num_agents * self.num_actors
mb_rnn_masks = None
mb_rnn_masks, indices, steps_mask, steps_state, play_mask, mb_rnn_states = self.init_rnn_step(batch_size, mb_rnn_states)
for n in range(self.horizon_length):
seq_indices, full_tensor = self.process_rnn_indices(mb_rnn_masks, indices, steps_mask, steps_state, mb_rnn_states)
if full_tensor:
break
if self.has_central_value:
self.central_value_net.pre_step_rnn(self.last_rnn_indices, self.last_state_indices)
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
res_dict = self.get_masked_action_values(self.obs, masks)
else:
res_dict = self.get_action_values(self.obs)
self.rnn_states = res_dict['rnn_states']
self.experience_buffer.update_data_rnn('obses', indices, play_mask, self.obs['obs'])
self.experience_buffer.update_data_rnn('dones', indices, play_mask, self.dones.byte())
for k in update_list:
self.experience_buffer.update_data_rnn(k, indices, play_mask, res_dict[k])
if self.has_central_value:
self.experience_buffer.update_data_rnn('states', indices[::self.num_agents] ,play_mask[::self.num_agents]//self.num_agents, self.obs['states'])
step_time_start = time.time()
self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions'])
step_time_end = time.time()
step_time += (step_time_end - step_time_start)
shaped_rewards = self.rewards_shaper(rewards)
if self.value_bootstrap and 'time_outs' in infos:
shaped_rewards += self.gamma * res_dict['values'] * self.cast_obs(infos['time_outs']).unsqueeze(1).float()
self.experience_buffer.update_data_rnn('rewards', indices, play_mask, shaped_rewards)
self.current_rewards += rewards
self.current_lengths += 1
all_done_indices = self.dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.process_rnn_dones(all_done_indices, indices, seq_indices)
if self.has_central_value:
self.central_value_net.post_step_rnn(all_done_indices)
self.algo_observer.process_infos(infos, done_indices)
fdones = self.dones.float()
not_dones = 1.0 - self.dones.float()
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
self.current_rewards = self.current_rewards * not_dones.unsqueeze(1)
self.current_lengths = self.current_lengths * not_dones
last_values = self.get_values(self.obs)
fdones = self.dones.float()
mb_fdones = self.experience_buffer.tensor_dict['dones'].float()
mb_values = self.experience_buffer.tensor_dict['values']
mb_rewards = self.experience_buffer.tensor_dict['rewards']
non_finished = (indices != self.horizon_length).nonzero(as_tuple=False)
ind_to_fill = indices[non_finished]
mb_fdones[ind_to_fill,non_finished] = fdones[non_finished]
mb_values[ind_to_fill,non_finished] = last_values[non_finished]
fdones[non_finished] = 1.0
last_values[non_finished] = 0
mb_advs = self.discount_values_masks(fdones, last_values, mb_fdones, mb_values, mb_rewards, mb_rnn_masks.view(-1,self.horizon_length).transpose(0,1))
mb_returns = mb_advs + mb_values
batch_dict = self.experience_buffer.get_transformed_list(swap_and_flatten01, self.tensor_list)
batch_dict['returns'] = swap_and_flatten01(mb_returns)
batch_dict['rnn_states'] = mb_rnn_states
batch_dict['rnn_masks'] = mb_rnn_masks
batch_dict['played_frames'] = n * self.num_actors * self.num_agents
batch_dict['step_time'] = step_time
return batch_dict
class DiscreteA2CBase(A2CBase):
def __init__(self, base_name, config):
A2CBase.__init__(self, base_name, config)
batch_size = self.num_agents * self.num_actors
action_space = self.env_info['action_space']
if type(action_space) is gym.spaces.Discrete:
self.actions_shape = (self.horizon_length, batch_size)
self.actions_num = action_space.n
self.is_multi_discrete = False
if type(action_space) is gym.spaces.Tuple:
self.actions_shape = (self.horizon_length, batch_size, len(action_space))
self.actions_num = [action.n for action in action_space]
self.is_multi_discrete = True
self.is_discrete = True
def init_tensors(self):
A2CBase.init_tensors(self)
self.update_list = ['actions', 'neglogpacs', 'values']
if self.use_action_masks:
self.update_list += ['action_masks']
self.tensor_list = self.update_list + ['obses', 'states', 'dones']
def train_epoch(self):
super().train_epoch()
self.set_eval()
play_time_start = time.time()
with torch.no_grad():
if self.is_rnn:
batch_dict = self.play_steps_rnn()
else:
batch_dict = self.play_steps()
self.set_train()
play_time_end = time.time()
update_time_start = time.time()
rnn_masks = batch_dict.get('rnn_masks', None)
self.curr_frames = batch_dict.pop('played_frames')
self.prepare_dataset(batch_dict)
self.algo_observer.after_steps()
a_losses = []
c_losses = []
entropies = []
kls = []
if self.has_central_value:
self.train_central_value()
if self.is_rnn:
print('non masked rnn obs ratio: ', rnn_masks.sum().item() / (rnn_masks.nelement()))
for _ in range(0, self.mini_epochs_num):
ep_kls = []
for i in range(len(self.dataset)):
a_loss, c_loss, entropy, kl, last_lr, lr_mul = self.train_actor_critic(self.dataset[i])
a_losses.append(a_loss)
c_losses.append(c_loss)
ep_kls.append(kl)
entropies.append(entropy)
av_kls = torch_ext.mean_list(ep_kls)
if self.multi_gpu:
av_kls = self.hvd.average_value(av_kls, 'ep_kls')
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item())
self.update_lr(self.last_lr)
kls.append(av_kls)
if self.has_phasic_policy_gradients:
self.ppg_aux_loss.train_net(self)
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return batch_dict['step_time'], play_time, update_time, total_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul
def prepare_dataset(self, batch_dict):
rnn_masks = batch_dict.get('rnn_masks', None)
obses = batch_dict['obses']
returns = batch_dict['returns']
values = batch_dict['values']
actions = batch_dict['actions']
neglogpacs = batch_dict['neglogpacs']
rnn_states = batch_dict.get('rnn_states', None)
advantages = returns - values
if self.normalize_value:
values = self.value_mean_std(values)
returns = self.value_mean_std(returns)
advantages = torch.sum(advantages, axis=1)
if self.normalize_advantage:
if self.is_rnn:
advantages = torch_ext.normalization_with_masks(advantages, rnn_masks)
else:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
dataset_dict = {}
dataset_dict['old_values'] = values
dataset_dict['old_logp_actions'] = neglogpacs
dataset_dict['advantages'] = advantages
dataset_dict['returns'] = returns
dataset_dict['actions'] = actions
dataset_dict['obs'] = obses
dataset_dict['rnn_states'] = rnn_states
dataset_dict['rnn_masks'] = rnn_masks
if self.use_action_masks:
dataset_dict['action_masks'] = batch_dict['action_masks']
self.dataset.update_values_dict(dataset_dict)
if self.has_central_value:
dataset_dict = {}
dataset_dict['old_values'] = values
dataset_dict['advantages'] = advantages
dataset_dict['returns'] = returns
dataset_dict['actions'] = actions
dataset_dict['obs'] = batch_dict['states']
dataset_dict['rnn_masks'] = rnn_masks
self.central_value_net.update_dataset(dataset_dict)
def train(self):
self.init_tensors()
self.mean_rewards = self.last_mean_rewards = -100500
start_time = time.time()
total_time = 0
rep_count = 0
# self.frame = 0 # loading from checkpoint
self.obs = self.env_reset()
if self.multi_gpu:
self.hvd.setup_algo(self)
while True:
epoch_num = self.update_epoch()
step_time, play_time, update_time, sum_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul = self.train_epoch()
# cleaning memory to optimize space
self.dataset.update_values_dict(None)
if self.multi_gpu:
self.hvd.sync_stats(self)
total_time += sum_time
self.frame += curr_frames
total_time += sum_time
if self.rank == 0:
scaled_time = sum_time #self.num_agents * sum_time
scaled_play_time = play_time #self.num_agents * play_time
curr_frames = self.curr_frames
frame = self.frame
self.write_stats(total_time, epoch_num, step_time, play_time, update_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul, frame, scaled_time, scaled_play_time, curr_frames)
if self.has_soft_aug:
self.writer.add_scalar('losses/aug_loss', np.mean(aug_losses), frame)
self.algo_observer.after_print_stats(frame, epoch_num, total_time)
if self.game_rewards.current_size > 0:
mean_rewards = self.game_rewards.get_mean()
mean_lengths = self.game_lengths.get_mean()
self.mean_rewards = mean_rewards[0]
for i in range(self.value_size):
rewards_name = 'rewards' if i == 0 else 'rewards{0}'.format(i)
self.writer.add_scalar(rewards_name + '/step'.format(i), mean_rewards[i], frame)
self.writer.add_scalar(rewards_name + '/iter'.format(i), mean_rewards[i], epoch_num)
self.writer.add_scalar(rewards_name + '/time'.format(i), mean_rewards[i], total_time)
self.writer.add_scalar('episode_lengths/step', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if self.has_self_play_config:
self.self_play_manager.update(self)
# removed equal signs (i.e. "rew=") from the checkpoint name since it messes with hydra CLI parsing
checkpoint_name = self.config['name'] + 'ep' + str(epoch_num) + 'rew' + str(mean_rewards)
if self.save_freq > 0:
if (epoch_num % self.save_freq == 0) and (mean_rewards <= self.last_mean_rewards):
self.save(os.path.join(self.nn_dir, 'last_' + checkpoint_name))
if mean_rewards[0] > self.last_mean_rewards and epoch_num >= self.save_best_after:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards[0]
self.save(os.path.join(self.nn_dir, self.config['name']))
if self.last_mean_rewards > self.config['score_to_win']:
print('Network won!')
self.save(os.path.join(self.nn_dir, checkpoint_name))
return self.last_mean_rewards, epoch_num
if epoch_num > self.max_epochs:
self.save(os.path.join(self.nn_dir, 'last_' + checkpoint_name))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, epoch_num
update_time = 0
if self.print_stats:
fps_step = curr_frames / step_time
fps_step_inference = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps step and policy inference: {fps_step_inference:.1f} fps total: {fps_total:.1f}')
class ContinuousA2CBase(A2CBase):
def __init__(self, base_name, config):
A2CBase.__init__(self, base_name, config)
self.is_discrete = False
action_space = self.env_info['action_space']
self.actions_num = action_space.shape[0]
self.bounds_loss_coef = config.get('bounds_loss_coef', None)
# todo introduce device instead of cuda()
self.actions_low = torch.from_numpy(action_space.low.copy()).float().to(self.ppo_device)
self.actions_high = torch.from_numpy(action_space.high.copy()).float().to(self.ppo_device)
def preprocess_actions(self, actions):
clamped_actions = torch.clamp(actions, -1.0, 1.0)
rescaled_actions = rescale_actions(self.actions_low, self.actions_high, clamped_actions)
if not self.is_tensor_obses:
rescaled_actions = rescaled_actions.cpu().numpy()
return rescaled_actions
def init_tensors(self):
A2CBase.init_tensors(self)
self.update_list = ['actions', 'neglogpacs', 'values', 'mus', 'sigmas']
self.tensor_list = self.update_list + ['obses', 'states', 'dones']
def train_epoch(self):
super().train_epoch()
self.set_eval()
play_time_start = time.time()
with torch.no_grad():
if self.is_rnn:
batch_dict = self.play_steps_rnn()
else:
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
rnn_masks = batch_dict.get('rnn_masks', None)
self.set_train()
self.curr_frames = batch_dict.pop('played_frames')
self.prepare_dataset(batch_dict)
self.algo_observer.after_steps()
if self.has_central_value:
self.train_central_value()
a_losses = []
c_losses = []
b_losses = []
entropies = []
kls = []
if self.is_rnn:
frames_mask_ratio = rnn_masks.sum().item() / (rnn_masks.nelement())
print(frames_mask_ratio)
for _ in range(0, self.mini_epochs_num):
ep_kls = []
for i in range(len(self.dataset)):
a_loss, c_loss, entropy, kl, last_lr, lr_mul, cmu, csigma, b_loss = self.train_actor_critic(self.dataset[i])
a_losses.append(a_loss)
c_losses.append(c_loss)
ep_kls.append(kl)
entropies.append(entropy)
if self.bounds_loss_coef is not None:
b_losses.append(b_loss)
self.dataset.update_mu_sigma(cmu, csigma)
if self.schedule_type == 'legacy':
if self.multi_gpu:
kl = self.hvd.average_value(kl, 'ep_kls')
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0,kl.item())
self.update_lr(self.last_lr)
av_kls = torch_ext.mean_list(ep_kls)
if self.schedule_type == 'standard':
if self.multi_gpu:
av_kls = self.hvd.average_value(av_kls, 'ep_kls')
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0,av_kls.item())
self.update_lr(self.last_lr)
kls.append(av_kls)
if self.schedule_type == 'standard_epoch':
if self.multi_gpu:
av_kls = self.hvd.average_value(torch_ext.mean_list(kls), 'ep_kls')
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0,av_kls.item())
self.update_lr(self.last_lr)
if self.has_phasic_policy_gradients:
self.ppg_aux_loss.train_net(self)
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return batch_dict['step_time'], play_time, update_time, total_time, a_losses, c_losses, b_losses, entropies, kls, last_lr, lr_mul
def prepare_dataset(self, batch_dict):
obses = batch_dict['obses']
returns = batch_dict['returns']
dones = batch_dict['dones']
values = batch_dict['values']
actions = batch_dict['actions']
neglogpacs = batch_dict['neglogpacs']
mus = batch_dict['mus']
sigmas = batch_dict['sigmas']
rnn_states = batch_dict.get('rnn_states', None)
rnn_masks = batch_dict.get('rnn_masks', None)
advantages = returns - values
if self.normalize_value:
values = self.value_mean_std(values)
returns = self.value_mean_std(returns)
advantages = torch.sum(advantages, axis=1)
if self.normalize_advantage:
if self.is_rnn:
advantages = torch_ext.normalization_with_masks(advantages, rnn_masks)
else:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
dataset_dict = {}
dataset_dict['old_values'] = values
dataset_dict['old_logp_actions'] = neglogpacs
dataset_dict['advantages'] = advantages
dataset_dict['returns'] = returns
dataset_dict['actions'] = actions
dataset_dict['obs'] = obses
dataset_dict['rnn_states'] = rnn_states
dataset_dict['rnn_masks'] = rnn_masks
dataset_dict['mu'] = mus
dataset_dict['sigma'] = sigmas
self.dataset.update_values_dict(dataset_dict)
if self.has_central_value:
dataset_dict = {}
dataset_dict['old_values'] = values
dataset_dict['advantages'] = advantages
dataset_dict['returns'] = returns
dataset_dict['actions'] = actions
dataset_dict['obs'] = batch_dict['states']
dataset_dict['rnn_masks'] = rnn_masks
self.central_value_net.update_dataset(dataset_dict)
def train(self):
self.init_tensors()
self.last_mean_rewards = -100500
start_time = time.time()
total_time = 0
rep_count = 0
self.obs = self.env_reset()
self.curr_frames = self.batch_size_envs
if self.multi_gpu:
self.hvd.setup_algo(self)
self.snr_ppo = []
while True:
epoch_num = self.update_epoch()
step_time, play_time, update_time, sum_time, a_losses, c_losses, b_losses, entropies, kls, last_lr, lr_mul = self.train_epoch()
total_time += sum_time
frame = self.frame
# cleaning memory to optimize space
self.dataset.update_values_dict(None)
if self.multi_gpu:
self.hvd.sync_stats(self)
if self.rank == 0:
# do we need scaled_time?
scaled_time = sum_time #self.num_agents * sum_time
scaled_play_time = play_time #self.num_agents * play_time
curr_frames = self.curr_frames
self.frame += curr_frames
self.write_stats(total_time, epoch_num, step_time, play_time, update_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul, frame, scaled_time, scaled_play_time, curr_frames)
if len(b_losses) > 0:
self.writer.add_scalar('losses/bounds_loss', torch_ext.mean_list(b_losses).item(), frame)
if self.has_soft_aug:
self.writer.add_scalar('losses/aug_loss', np.mean(aug_losses), frame)
mean_rewards = [0]
mean_lengths = 0
self.writer.add_scalar('snr/pol_grad', np.mean(self.snr_ppo), frame)
self.snr_ppo = []
if self.game_rewards.current_size > 0:
mean_rewards = self.game_rewards.get_mean()
mean_lengths = self.game_lengths.get_mean()
self.mean_rewards = mean_rewards[0]
for i in range(self.value_size):
rewards_name = 'rewards' if i == 0 else 'rewards{0}'.format(i)
self.writer.add_scalar(rewards_name + '/step'.format(i), mean_rewards[i], frame)
self.writer.add_scalar(rewards_name + '/iter'.format(i), mean_rewards[i], epoch_num)
self.writer.add_scalar(rewards_name + '/time'.format(i), mean_rewards[i], total_time)
self.writer.add_scalar('episode_lengths/step', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if self.has_self_play_config:
self.self_play_manager.update(self)
checkpoint_name = self.config['name'] + 'ep' + str(epoch_num) + 'rew' + str(mean_rewards)
if self.save_freq > 0:
if (epoch_num % self.save_freq == 0) and (mean_rewards[0] <= self.last_mean_rewards):
self.save(os.path.join(self.nn_dir, 'last_' + checkpoint_name))
if mean_rewards[0] > self.last_mean_rewards and epoch_num >= self.save_best_after:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards[0]
self.save(os.path.join(self.nn_dir, self.config['name']))
if self.last_mean_rewards > self.config['score_to_win']:
print('Network won!')
self.save(os.path.join(self.nn_dir, checkpoint_name))
return self.last_mean_rewards, epoch_num
if epoch_num > self.max_epochs:
self.save(os.path.join(self.nn_dir, 'last_' + self.config['name'] + 'ep' + str(epoch_num) + 'rew' + str(mean_rewards)))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, epoch_num
update_time = 0
if self.print_stats:
fps_step = curr_frames / step_time
fps_step_inference = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
# print(f'fps step: {fps_step:.1f} fps step and policy inference: {fps_step_inference:.1f} fps total: {fps_total:.1f} mean reward: {mean_rewards[0]:.2f} mean lengths: {mean_lengths:.1f}')
print(f'epoch: {epoch_num} fps step: {fps_step:.1f} fps total: {fps_total:.1f} mean reward: {mean_rewards[0]:.2f} mean lengths: {mean_lengths:.1f}')
| 51,685 |
Python
| 41.928571 | 208 | 0.585354 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/experiment.py
|
import copy
import yaml
class Experiment:
def __init__(self, config, experiment_config):
self.config = copy.deepcopy(config)
self.best_config = copy.deepcopy(self.config)
self.experiment_config = experiment_config
self.best_results = -100500, 0
self.use_best_prev_result = self.experiment_config.get('use_best_prev_result', True)
self.experiments = self.experiment_config['experiments']
self.last_exp_idx = self.experiment_config.get('start_exp', 0)
self.sub_idx = self.experiment_config.get('start_sub_exp', 0)
self.done = False
self.results = {}
self.create_config()
def _set_parameter(self, config, path, value):
keys = path.split('.')
sub_conf = config
for key in keys[:-1]:
sub_conf = sub_conf[key]
print('set:' + str(keys) + ':' + str(value))
sub_conf[keys[-1]] = value
def set_results(self, rewards, epochs):
self.results[(self.last_exp_idx, self.sub_idx)] = rewards, epochs
if self.best_results[0] < rewards:
self.best_results = rewards, epochs
def create_config(self):
if self.done:
self.current_config = None
return
self.current_config = copy.deepcopy(self.config)
self.current_config['config']['name'] += '_' + str(self.last_exp_idx) + '_' + str(self.sub_idx)
print('Experiment name: ' + self.current_config['config']['name'])
for key in self.experiments[self.last_exp_idx]['exp']:
self._set_parameter(self.current_config, key['path'], key['value'][self.sub_idx])
with open('data.yml', 'w') as outfile:
yaml.dump(self.current_config, outfile, default_flow_style=False)
def get_next_config(self):
config = self.current_config
max_vals = len(self.experiments[0]['exp'][0]['value'])
self.sub_idx += 1
if self.sub_idx >= max_vals:
self.sub_idx = 0
self.last_exp_idx += 1
if self.last_exp_idx >= len(self.experiments):
self.done = True
else:
self.last_exp_idx += 1
self.create_config()
return config
#def __iter__(self):
# print('__iter__')
# return self
def __next__(self):
print('__next__')
res = self.get_next_config()
if res is not None:
yield res
| 2,457 |
Python
| 33.619718 | 103 | 0.564103 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/schedulers.py
|
class RLScheduler:
def __init__(self):
pass
def update(self,current_lr, entropy_coef, epoch, frames, **kwargs):
pass
class IdentityScheduler(RLScheduler):
def __init__(self):
super().__init__()
def update(self, current_lr, entropy_coef, epoch, frames, kl_dist, **kwargs):
return current_lr, entropy_coef
class AdaptiveScheduler(RLScheduler):
def __init__(self, kl_threshold = 0.008):
super().__init__()
self.min_lr = 1e-6
self.max_lr = 1e-2
self.kl_threshold = kl_threshold
def update(self, current_lr, entropy_coef, epoch, frames, kl_dist, **kwargs):
lr = current_lr
if kl_dist > (2.0 * self.kl_threshold):
lr = max(current_lr / 1.5, self.min_lr)
if kl_dist < (0.5 * self.kl_threshold):
lr = min(current_lr * 1.5, self.max_lr)
return lr, entropy_coef
class LinearScheduler(RLScheduler):
def __init__(self, start_lr, min_lr=1e-6, max_steps = 1000000, use_epochs=True, apply_to_entropy=False, **kwargs):
super().__init__()
self.start_lr = start_lr
self.min_lr = min_lr
self.max_steps = max_steps
self.use_epochs = use_epochs
self.apply_to_entropy = apply_to_entropy
if apply_to_entropy:
self.start_entropy_coef = kwargs.pop('start_entropy_coef', 0.01)
self.min_entropy_coef = kwargs.pop('min_entropy_coef', 0.0001)
def update(self, current_lr, entropy_coef, epoch, frames, kl_dist, **kwargs):
if self.use_epochs:
steps = epoch
else:
steps = frames
mul = max(0, self.max_steps - steps)/self.max_steps
lr = self.min_lr + (self.start_lr - self.min_lr) * mul
if self.apply_to_entropy:
entropy_coef = self.min_entropy_coef + (self.start_entropy_coef - self.min_entropy_coef) * mul
return lr, entropy_coef
| 1,948 |
Python
| 33.803571 | 118 | 0.582136 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/wrappers.py
|
import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
from copy import copy
class InfoWrapper(gym.Wrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self.reward = 0
def reset(self, **kwargs):
self.reward = 0
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
self.reward += reward
if done:
info['scores'] = self.reward
return observation, reward, done, info
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on True game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class EpisodeStackedEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.max_stacked_steps = 1000
self.current_steps=0
def step(self, action):
obs, reward, done, info = self.env.step(action)
if reward == 0:
self.current_steps += 1
else:
self.current_steps = 0
if self.current_steps == self.max_stacked_steps:
self.current_steps = 0
print('max_stacked_steps!')
done = True
reward = -1
obs = self.env.reset()
return obs, reward, done, info
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env,skip=4, use_max = True):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
self.use_max = use_max
# most recent raw observations (for max pooling across time steps)
if self.use_max:
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
else:
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.float32)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if self.use_max:
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
else:
self._obs_buffer[0] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
if self.use_max:
max_frame = self._obs_buffer.max(axis=0)
else:
max_frame = self._obs_buffer[0]
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = width
self.height = height
self.grayscale = grayscale
if self.grayscale:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
else:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 3), dtype=np.uint8)
def observation(self, frame):
import cv2
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
if self.grayscale:
frame = np.expand_dims(frame, -1)
return frame
class FrameStack(gym.Wrapper):
def __init__(self, env, k, flat = False):
"""
Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.flat = flat
self.frames = deque([], maxlen=k)
observation_space = env.observation_space
self.shp = shp = observation_space.shape
#TODO: remove consts -1 and 1
if flat:
self.observation_space = spaces.Box(low=-1, high=1, shape=(shp[:-1] + (shp[-1] * k,)), dtype=observation_space.dtype)
else:
if len(shp) == 1:
self.observation_space = spaces.Box(low=-1, high=1, shape=(k, shp[0]), dtype=observation_space.dtype)
else:
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
if self.flat:
return np.squeeze(self.frames).flatten()
else:
if len(self.shp) == 1:
res = np.concatenate([f[..., np.newaxis] for f in self.frames], axis=-1)
#print('shape:', np.shape(res))
#print('shape:', np.shape(np.transpose(res)))
return np.transpose(res)
else:
return np.concatenate(self.frames, axis=-1)
#return LazyFrames(list(self.frames))
class BatchedFrameStack(gym.Wrapper):
def __init__(self, env, k, transpose = False, flatten = False):
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
self.shp = shp = env.observation_space.shape
self.transpose = transpose
self.flatten = flatten
if transpose:
assert(not flatten)
self.observation_space = spaces.Box(low=0, high=1, shape=(shp[0], k), dtype=env.observation_space.dtype)
else:
if flatten:
self.observation_space = spaces.Box(low=0, high=1, shape=(k *shp[0],), dtype=env.observation_space.dtype)
else:
self.observation_space = spaces.Box(low=0, high=1, shape=(k, shp[0]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
if self.transpose:
frames = np.transpose(self.frames, (1, 2, 0))
else:
if self.flatten:
frames = np.array(self.frames)
shape = np.shape(frames)
frames = np.transpose(self.frames, (1, 0, 2))
frames = np.reshape(self.frames, (shape[1], shape[0] * shape[2]))
else:
frames = np.transpose(self.frames, (1, 0, 2))
return frames
class BatchedFrameStackWithStates(gym.Wrapper):
def __init__(self, env, k, transpose = False, flatten = False):
gym.Wrapper.__init__(self, env)
self.k = k
self.obses = deque([], maxlen=k)
self.states = deque([], maxlen=k)
self.shp = shp = env.observation_space.shape
self.state_shp = state_shp = env.state_space.shape
self.transpose = transpose
self.flatten = flatten
if transpose:
assert(not flatten)
self.observation_space = spaces.Box(low=0, high=1, shape=(shp[0], k), dtype=env.observation_space.dtype)
self.state_space = spaces.Box(low=0, high=1, shape=(state_shp[0], k), dtype=env.observation_space.dtype)
else:
if flatten:
self.observation_space = spaces.Box(low=0, high=1, shape=(k*shp[0],), dtype=env.observation_space.dtype)
self.state_space = spaces.Box(low=0, high=1, shape=(k*state_shp[0],), dtype=env.observation_space.dtype)
else:
self.observation_space = spaces.Box(low=0, high=1, shape=(k, shp[0]), dtype=env.observation_space.dtype)
self.state_space = spaces.Box(low=0, high=1, shape=(k, state_shp[0]), dtype=env.observation_space.dtype)
def reset(self):
obs_dict = self.env.reset()
ob = obs_dict["obs"]
state = obs_dict["state"]
for _ in range(self.k):
self.obses.append(ob)
self.states.append(state)
return self._get_ob()
def step(self, action):
obs_dict, reward, done, info = self.env.step(action)
ob = obs_dict["obs"]
state = obs_dict["state"]
self.obses.append(ob)
self.states.append(state)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.obses) == self.k
obses = self.process_data(self.obses)
states = self.process_data(self.states)
return {"obs": obses, "state" : states}
def process_data(self, data):
if len(np.shape(data)) < 3:
return np.array(data)
if self.transpose:
obses = np.transpose(data, (1, 2, 0))
else:
if self.flatten:
obses = np.array(data)
shape = np.shape(obses)
obses = np.transpose(data, (1, 0, 2))
obses = np.reshape(data, (shape[1], shape[0] * shape[2]))
else:
obses = np.transpose(data, (1, 0, 2))
return obses
class ProcgenStack(gym.Wrapper):
def __init__(self, env, k = 2, greyscale=True):
gym.Wrapper.__init__(self, env)
self.k = k
self.curr_frame = 0
self.frames = deque([], maxlen=k)
self.greyscale=greyscale
self.prev_frame = None
shp = env.observation_space.shape
if greyscale:
shape = (shp[:-1] + (shp[-1] + k - 1,))
else:
shape = (shp[:-1] + (shp[-1] * k,))
self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)
def reset(self):
import cv2
frames = self.env.reset()
self.frames.append(frames)
if self.greyscale:
self.prev_frame = np.expand_dims(cv2.cvtColor(frames, cv2.COLOR_RGB2GRAY), axis=-1)
for _ in range(self.k-1):
self.frames.append(self.prev_frame)
else:
for _ in range(self.k-1):
self.frames.append(frames)
return self._get_ob()
def step(self, action):
import cv2
frames, reward, done, info = self.env.step(action)
if self.greyscale:
self.frames[self.k-1] = self.prev_frame
self.prev_frame = np.expand_dims(cv2.cvtColor(frames, cv2.COLOR_RGB2GRAY), axis=-1)
self.frames.append(frames)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
stacked_frames = np.concatenate(self.frames, axis=-1)
return stacked_frames
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class ReallyDoneWrapper(gym.Wrapper):
def __init__(self, env):
"""
Make it work with video monitor to record whole game video isntead of one life
"""
self.old_env = env
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
old_lives = self.env.unwrapped.ale.lives()
obs, reward, done, info = self.env.step(action)
lives = self.env.unwrapped.ale.lives()
if done:
return obs, reward, done, info
if old_lives > lives:
print('lives:', lives)
obs, _, done, _ = self.env.step(1)
done = lives == 0
return obs, reward, done, info
class AllowBacktracking(gym.Wrapper):
"""
Use deltas in max(X) as the reward, rather than deltas
in X. This way, agents are not discouraged too heavily
from exploring backwards if there is no way to advance
head-on in the level.
"""
def __init__(self, env):
super(AllowBacktracking, self).__init__(env)
self._cur_x = 0
self._max_x = 0
def reset(self, **kwargs): # pylint: disable=E0202
self._cur_x = 0
self._max_x = 0
return self.env.reset(**kwargs)
def step(self, action): # pylint: disable=E0202
obs, rew, done, info = self.env.step(action)
self._cur_x += rew
rew = max(0, self._cur_x - self._max_x)
self._max_x = max(self._max_x, self._cur_x)
return obs, rew, done, info
def unwrap(env):
if hasattr(env, "unwrapped"):
return env.unwrapped
elif hasattr(env, "env"):
return unwrap(env.env)
elif hasattr(env, "leg_env"):
return unwrap(env.leg_env)
else:
return env
class StickyActionEnv(gym.Wrapper):
def __init__(self, env, p=0.25):
super(StickyActionEnv, self).__init__(env)
self.p = p
self.last_action = 0
def reset(self):
self.last_action = 0
return self.env.reset()
def step(self, action):
if self.unwrapped.np_random.uniform() < self.p:
action = self.last_action
self.last_action = action
obs, reward, done, info = self.env.step(action)
return obs, reward, done, info
class MontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env, room_address):
super(MontezumaInfoWrapper, self).__init__(env)
self.room_address = room_address
self.visited_rooms = set()
def get_current_room(self):
ram = unwrap(self.env).ale.getRAM()
assert len(ram) == 128
return int(ram[self.room_address])
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.visited_rooms.add(self.get_current_room())
if done:
if 'scores' not in info:
info['scores'] = {}
info['scores'].update(visited_rooms=copy(self.visited_rooms))
self.visited_rooms.clear()
return obs, rew, done, info
def reset(self):
return self.env.reset()
class TimeLimit(gym.Wrapper):
"""
A little bit changed original openai's TimeLimit env.
Main difference is that we always send true or false in infos['time_outs']
"""
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self.concat_infos = True
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def step(self, action):
assert self._elapsed_steps is not None, "Cannot call env.step() before calling reset()"
observation, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
info['time_outs'] = False
if self._elapsed_steps >= self._max_episode_steps:
info['time_outs'] = True
done = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class MaskVelocityWrapper(gym.ObservationWrapper):
"""
Gym environment observation wrapper used to mask velocity terms in
observations. The intention is the make the MDP partially observatiable.
"""
def __init__(self, env, name):
super(MaskVelocityWrapper, self).__init__(env)
if name == "CartPole-v1":
self.mask = np.array([1., 0., 1., 0.])
elif name == "Pendulum-v0":
self.mask = np.array([1., 1., 0.])
elif name == "LunarLander-v2":
self.mask = np.array([1., 1., 0., 0., 1., 0., 1., 1,])
elif name == "LunarLanderContinuous-v2":
self.mask = np.array([1., 1., 0., 0., 1., 0., 1., 1,])
else:
raise NotImplementedError
def observation(self, observation):
return observation * self.mask
def make_atari(env_id, timelimit=True, noop_max=0, skip=4, sticky=False, directory=None):
env = gym.make(env_id)
if 'Montezuma' in env_id:
env = MontezumaInfoWrapper(env, room_address=3 if 'Montezuma' in env_id else 1)
env = StickyActionEnv(env)
env = InfoWrapper(env)
if directory != None:
env = gym.wrappers.Monitor(env,directory=directory,force=True)
if sticky:
env = StickyActionEnv(env)
if not timelimit:
env = env.env
#assert 'NoFrameskip' in env.spec.id
if noop_max > 0:
env = NoopResetEnv(env, noop_max=noop_max)
env = MaxAndSkipEnv(env, skip=skip)
#env = EpisodeStackedEnv(env)
return env
def wrap_deepmind(env, episode_life=False, clip_rewards=True, frame_stack=True, scale =False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
def wrap_carracing(env, clip_rewards=True, frame_stack=True, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
def make_car_racing(env_id, skip=4):
env = make_atari(env_id, noop_max=0, skip=skip)
return wrap_carracing(env, clip_rewards=False)
def make_atari_deepmind(env_id, noop_max=30, skip=4, sticky=False, episode_life=True):
env = make_atari(env_id, noop_max=noop_max, skip=skip, sticky=sticky)
return wrap_deepmind(env, episode_life=episode_life, clip_rewards=False)
| 22,702 |
Python
| 33.927692 | 134 | 0.576337 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/rollouts.py
|
'''
TODO: move play_steps here
'''
class Rollout:
def __init__(self, gamma):
self.gamma = gamma
def play_steps(self, env, max_steps_count = 1):
pass
class DiscretePpoRollout(Rollout):
def __init__(self, gamma, lam):
super(Rollout, self).__init__(gamma)
self.lam = lam
def play_steps(self, env, max_steps_count = 1):
pass
| 382 |
Python
| 18.149999 | 51 | 0.578534 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/common_losses.py
|
from torch import nn
import torch
def critic_loss(value_preds_batch, values, curr_e_clip, return_batch, clip_value):
if clip_value:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-curr_e_clip, curr_e_clip)
value_losses = (values - return_batch)**2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses,
value_losses_clipped)
else:
c_loss = (return_batch - values)**2
return c_loss
def actor_loss(old_action_log_probs_batch, action_log_probs, advantage, is_ppo, curr_e_clip):
if is_ppo:
ratio = torch.exp(old_action_log_probs_batch - action_log_probs)
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - curr_e_clip,
1.0 + curr_e_clip)
a_loss = torch.max(-surr1, -surr2)
else:
a_loss = (action_log_probs * advantage)
return a_loss
| 1,020 |
Python
| 36.814813 | 93 | 0.583333 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/env_configurations.py
|
from rl_games.common import wrappers
from rl_games.common import tr_helpers
import rl_games.envs.test
from rl_games.envs.brax import create_brax_env
import gym
from gym.wrappers import FlattenObservation, FilterObservation
import numpy as np
#FLEX_PATH = '/home/viktor/Documents/rl/FlexRobotics'
FLEX_PATH = '/home/trrrrr/Documents/FlexRobotics-master'
class HCRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.max([-10, reward])
class DMControlReward(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self.num_stops = 0
self.max_stops = 1000
self.reward_threshold = 0.001
def reset(self, **kwargs):
self.num_stops = 0
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
if reward < self.reward_threshold:
self.num_stops += 1
else:
self.num_stops = max(0, self.num_stops-1)
if self.num_stops > self.max_stops:
#print('too many stops!')
reward = -10
observation = self.reset()
done = True
return observation, self.reward(reward), done, info
def reward(self, reward):
return reward
class DMControlObsWrapper(gym.ObservationWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def observation(self, obs):
return obs['observations']
def create_default_gym_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
is_procgen = kwargs.pop('procgen', False)
limit_steps = kwargs.pop('limit_steps', False)
env = gym.make(name, **kwargs)
if frames > 1:
if is_procgen:
env = wrappers.ProcgenStack(env, frames, True)
else:
env = wrappers.FrameStack(env, frames, False)
if limit_steps:
env = wrappers.LimitStepsWrapper(env)
return env
def create_goal_gym_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
env = gym.make(name, **kwargs)
env = FlattenObservation(FilterObservation(env, ['observation', 'desired_goal']))
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
if limit_steps:
env = wrappers.LimitStepsWrapper(env)
return env
def create_slime_gym_env(**kwargs):
import slimevolleygym
from rl_games.envs.slimevolley_selfplay import SlimeVolleySelfplay
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
self_play = kwargs.pop('self_play', False)
if self_play:
env = SlimeVolleySelfplay(name, **kwargs)
else:
env = gym.make(name, **kwargs)
return env
def create_connect_four_env(**kwargs):
from rl_games.envs.connect4_selfplay import ConnectFourSelfPlay
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
self_play = kwargs.pop('self_play', False)
if self_play:
env = ConnectFourSelfPlay(name, **kwargs)
else:
env = gym.make(name, **kwargs)
return env
def create_atari_gym_env(**kwargs):
#frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
skip = kwargs.pop('skip',4)
episode_life = kwargs.pop('episode_life',True)
env = wrappers.make_atari_deepmind(name, skip=skip,episode_life=episode_life)
return env
def create_dm_control_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = 'dm2gym:'+ kwargs.pop('name')
env = gym.make(name, environment_kwargs=kwargs)
env = DMControlReward(env)
env = DMControlObsWrapper(env)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
def create_super_mario_env(name='SuperMarioBros-v1'):
import gym
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
import gym_super_mario_bros
env = gym_super_mario_bros.make(name)
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = wrappers.MaxAndSkipEnv(env, skip=4)
env = wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=True)
return env
def create_super_mario_env_stage1(name='SuperMarioBrosRandomStage1-v1'):
import gym
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
import gym_super_mario_bros
stage_names = [
'SuperMarioBros-1-1-v1',
'SuperMarioBros-1-2-v1',
'SuperMarioBros-1-3-v1',
'SuperMarioBros-1-4-v1',
]
env = gym_super_mario_bros.make(stage_names[1])
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = wrappers.MaxAndSkipEnv(env, skip=4)
env = wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=True)
#env = wrappers.AllowBacktracking(env)
return env
def create_quadrupped_env():
import gym
import roboschool
import quadruppedEnv
return wrappers.FrameStack(wrappers.MaxAndSkipEnv(gym.make('QuadruppedWalk-v1'), 4, False), 2, True)
def create_roboschool_env(name):
import gym
import roboschool
return gym.make(name)
def create_smac(name, **kwargs):
from rl_games.envs.smac_env import SMACEnv
frames = kwargs.pop('frames', 1)
transpose = kwargs.pop('transpose', False)
flatten = kwargs.pop('flatten', True)
has_cv = kwargs.get('central_value', False)
env = SMACEnv(name, **kwargs)
if frames > 1:
if has_cv:
env = wrappers.BatchedFrameStackWithStates(env, frames, transpose=False, flatten=flatten)
else:
env = wrappers.BatchedFrameStack(env, frames, transpose=False, flatten=flatten)
return env
def create_smac_cnn(name, **kwargs):
from rl_games.envs.smac_env import SMACEnv
has_cv = kwargs.get('central_value', False)
frames = kwargs.pop('frames', 4)
transpose = kwargs.pop('transpose', False)
env = SMACEnv(name, **kwargs)
if has_cv:
env = wrappers.BatchedFrameStackWithStates(env, frames, transpose=transpose)
else:
env = wrappers.BatchedFrameStack(env, frames, transpose=transpose)
return env
def create_test_env(name, **kwargs):
import rl_games.envs.test
env = gym.make(name, **kwargs)
return env
def create_minigrid_env(name, **kwargs):
import gym_minigrid
import gym_minigrid.wrappers
state_bonus = kwargs.pop('state_bonus', False)
action_bonus = kwargs.pop('action_bonus', False)
fully_obs = kwargs.pop('fully_obs', False)
env = gym.make(name, **kwargs)
if state_bonus:
env = gym_minigrid.wrappers.StateBonus(env)
if action_bonus:
env = gym_minigrid.wrappers.ActionBonus(env)
if fully_obs:
env = gym_minigrid.wrappers.RGBImgObsWrapper(env)
else:
env = gym_minigrid.wrappers.RGBImgPartialObsWrapper(env) # Get pixel observations
env = gym_minigrid.wrappers.ImgObsWrapper(env) # Get rid of the 'mission' field
print('minigird_env observation space shape:', env.observation_space)
return env
def create_multiwalker_env(**kwargs):
from rl_games.envs.multiwalker import MultiWalker
env = MultiWalker('', **kwargs)
return env
def create_diambra_env(**kwargs):
from rl_games.envs.diambra.diambra import DiambraEnv
env = DiambraEnv(**kwargs)
return env
def create_env(name, **kwargs):
steps_limit = kwargs.pop('steps_limit', None)
env = gym.make(name, **kwargs)
if steps_limit is not None:
env = wrappers.TimeLimit(env, steps_limit)
return env
configurations = {
'CartPole-v1' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : gym.make('CartPole-v1'),
},
'CartPoleMaskedVelocity-v1' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : wrappers.MaskVelocityWrapper(gym.make('CartPole-v1'), 'CartPole-v1'),
},
'MountainCarContinuous-v0' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : gym.make('MountainCarContinuous-v0'),
},
'MountainCar-v0' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda : gym.make('MountainCar-v0'),
},
'Acrobot-v1' : {
'env_creator' : lambda **kwargs : gym.make('Acrobot-v1'),
'vecenv_type' : 'RAY'
},
'Pendulum-v0' : {
'env_creator' : lambda **kwargs : gym.make('Pendulum-v0'),
'vecenv_type' : 'RAY'
},
'LunarLander-v2' : {
'env_creator' : lambda **kwargs : gym.make('LunarLander-v2'),
'vecenv_type' : 'RAY'
},
'PongNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('PongNoFrameskip-v4', skip=4),
'vecenv_type' : 'RAY'
},
'BreakoutNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('BreakoutNoFrameskip-v4', skip=4,sticky=False),
'vecenv_type' : 'RAY'
},
'MsPacmanNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('MsPacmanNoFrameskip-v4', skip=4),
'vecenv_type' : 'RAY'
},
'CarRacing-v0' : {
'env_creator' : lambda **kwargs : wrappers.make_car_racing('CarRacing-v0', skip=4),
'vecenv_type' : 'RAY'
},
'RoboschoolAnt-v1' : {
'env_creator' : lambda **kwargs : create_roboschool_env('RoboschoolAnt-v1'),
'vecenv_type' : 'RAY'
},
'SuperMarioBros-v1' : {
'env_creator' : lambda : create_super_mario_env(),
'vecenv_type' : 'RAY'
},
'SuperMarioBrosRandomStages-v1' : {
'env_creator' : lambda : create_super_mario_env('SuperMarioBrosRandomStages-v1'),
'vecenv_type' : 'RAY'
},
'SuperMarioBrosRandomStage1-v1' : {
'env_creator' : lambda **kwargs : create_super_mario_env_stage1('SuperMarioBrosRandomStage1-v1'),
'vecenv_type' : 'RAY'
},
'RoboschoolHalfCheetah-v1' : {
'env_creator' : lambda **kwargs : create_roboschool_env('RoboschoolHalfCheetah-v1'),
'vecenv_type' : 'RAY'
},
'RoboschoolHumanoid-v1' : {
'env_creator' : lambda : wrappers.FrameStack(create_roboschool_env('RoboschoolHumanoid-v1'), 1, True),
'vecenv_type' : 'RAY'
},
'LunarLanderContinuous-v2' : {
'env_creator' : lambda **kwargs : gym.make('LunarLanderContinuous-v2'),
'vecenv_type' : 'RAY'
},
'RoboschoolHumanoidFlagrun-v1' : {
'env_creator' : lambda **kwargs : wrappers.FrameStack(create_roboschool_env('RoboschoolHumanoidFlagrun-v1'), 1, True),
'vecenv_type' : 'RAY'
},
'BipedalWalker-v3' : {
'env_creator' : lambda **kwargs : create_env('BipedalWalker-v3', **kwargs),
'vecenv_type' : 'RAY'
},
'BipedalWalkerCnn-v3' : {
'env_creator' : lambda **kwargs : wrappers.FrameStack(HCRewardEnv(gym.make('BipedalWalker-v3')), 4, False),
'vecenv_type' : 'RAY'
},
'BipedalWalkerHardcore-v3' : {
'env_creator' : lambda **kwargs : gym.make('BipedalWalkerHardcore-v3'),
'vecenv_type' : 'RAY'
},
'ReacherPyBulletEnv-v0' : {
'env_creator' : lambda **kwargs : create_roboschool_env('ReacherPyBulletEnv-v0'),
'vecenv_type' : 'RAY'
},
'BipedalWalkerHardcoreCnn-v3' : {
'env_creator' : lambda : wrappers.FrameStack(gym.make('BipedalWalkerHardcore-v3'), 4, False),
'vecenv_type' : 'RAY'
},
'QuadruppedWalk-v1' : {
'env_creator' : lambda **kwargs : create_quadrupped_env(),
'vecenv_type' : 'RAY'
},
'FlexAnt' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/ant.yaml'),
'vecenv_type' : 'ISAAC'
},
'FlexHumanoid' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/humanoid.yaml'),
'vecenv_type' : 'ISAAC'
},
'FlexHumanoidHard' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/humanoid_hard.yaml'),
'vecenv_type' : 'ISAAC'
},
'smac' : {
'env_creator' : lambda **kwargs : create_smac(**kwargs),
'vecenv_type' : 'RAY_SMAC'
},
'smac_cnn' : {
'env_creator' : lambda **kwargs : create_smac_cnn(**kwargs),
'vecenv_type' : 'RAY_SMAC'
},
'dm_control' : {
'env_creator' : lambda **kwargs : create_dm_control_env(**kwargs),
'vecenv_type' : 'RAY'
},
'openai_gym' : {
'env_creator' : lambda **kwargs : create_default_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'openai_robot_gym' : {
'env_creator' : lambda **kwargs : create_goal_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'atari_gym' : {
'env_creator' : lambda **kwargs : create_atari_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'slime_gym' : {
'env_creator' : lambda **kwargs : create_slime_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'test_env' : {
'env_creator' : lambda **kwargs : create_test_env(kwargs.pop('name'), **kwargs),
'vecenv_type' : 'RAY'
},
'minigrid_env' : {
'env_creator' : lambda **kwargs : create_minigrid_env(kwargs.pop('name'), **kwargs),
'vecenv_type' : 'RAY'
},
'connect4_env' : {
'env_creator' : lambda **kwargs : create_connect_four_env(**kwargs),
'vecenv_type' : 'RAY'
},
'multiwalker_env' : {
'env_creator' : lambda **kwargs : create_multiwalker_env(**kwargs),
'vecenv_type' : 'RAY'
},
'diambra': {
'env_creator': lambda **kwargs: create_diambra_env(**kwargs),
'vecenv_type': 'RAY'
},
'brax' : {
'env_creator': lambda **kwargs: create_brax_env(**kwargs),
'vecenv_type': 'BRAX'
},
}
def get_env_info(env):
result_shapes = {}
result_shapes['observation_space'] = env.observation_space
result_shapes['action_space'] = env.action_space
result_shapes['agents'] = 1
result_shapes['value_size'] = 1
if hasattr(env, "get_number_of_agents"):
result_shapes['agents'] = env.get_number_of_agents()
'''
if isinstance(result_shapes['observation_space'], gym.spaces.dict.Dict):
result_shapes['observation_space'] = observation_space['observations']
if isinstance(result_shapes['observation_space'], dict):
result_shapes['observation_space'] = observation_space['observations']
result_shapes['state_space'] = observation_space['states']
'''
if hasattr(env, "value_size"):
result_shapes['value_size'] = env.value_size
print(result_shapes)
return result_shapes
def get_obs_and_action_spaces_from_config(config):
env_config = config.get('env_config', {})
env = configurations[config['env_name']]['env_creator'](**env_config)
result_shapes = get_env_info(env)
env.close()
return result_shapes
def register(name, config):
configurations[name] = config
| 15,163 |
Python
| 33 | 127 | 0.617028 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/experience.py
|
import numpy as np
import random
import gym
import torch
from rl_games.common.segment_tree import SumSegmentTree, MinSegmentTree
import torch
from rl_games.algos_torch.torch_ext import numpy_to_torch_dtype_dict
class ReplayBuffer(object):
def __init__(self, size, ob_space):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._obses = np.zeros((size,) + ob_space.shape, dtype=ob_space.dtype)
self._next_obses = np.zeros((size,) + ob_space.shape, dtype=ob_space.dtype)
self._rewards = np.zeros(size)
self._actions = np.zeros(size, dtype=np.int32)
self._dones = np.zeros(size, dtype=np.bool)
self._maxsize = size
self._next_idx = 0
self._curr_size = 0
def __len__(self):
return self._curr_size
def add(self, obs_t, action, reward, obs_tp1, done):
self._curr_size = min(self._curr_size + 1, self._maxsize )
self._obses[self._next_idx] = obs_t
self._next_obses[self._next_idx] = obs_tp1
self._rewards[self._next_idx] = reward
self._actions[self._next_idx] = action
self._dones[self._next_idx] = done
self._next_idx = (self._next_idx + 1) % self._maxsize
def _get(self, idx):
return self._obses[idx], self._actions[idx], self._rewards[idx], self._next_obses[idx], self._dones[idx]
def _encode_sample(self, idxes):
batch_size = len(idxes)
obses_t, actions, rewards, obses_tp1, dones = [None] * batch_size, [None] * batch_size, [None] * batch_size, [None] * batch_size, [None] * batch_size
it = 0
for i in idxes:
data = self._get(i)
obs_t, action, reward, obs_tp1, done = data
obses_t[it] = np.array(obs_t, copy=False)
actions[it] = np.array(action, copy=False)
rewards[it] = reward
obses_tp1[it] = np.array(obs_tp1, copy=False)
dones[it] = done
it = it + 1
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, self._curr_size - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha, ob_space):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size, ob_space)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, self._curr_size - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * self._curr_size) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * self._curr_size) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < self._curr_size
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
class VectorizedReplayBuffer:
def __init__(self, obs_shape, action_shape, capacity, device):
"""Create Vectorized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
See Also
--------
ReplayBuffer.__init__
"""
self.device = device
self.obses = torch.empty((capacity, *obs_shape), dtype=torch.float32, device=self.device)
self.next_obses = torch.empty((capacity, *obs_shape), dtype=torch.float32, device=self.device)
self.actions = torch.empty((capacity, *action_shape), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((capacity, 1), dtype=torch.float32, device=self.device)
self.dones = torch.empty((capacity, 1), dtype=torch.bool, device=self.device)
self.capacity = capacity
self.idx = 0
self.full = False
def add(self, obs, action, reward, next_obs, done):
num_observations = obs.shape[0]
remaining_capacity = min(self.capacity - self.idx, num_observations)
overflow = num_observations - remaining_capacity
if remaining_capacity < num_observations:
self.obses[0: overflow] = obs[-overflow:]
self.actions[0: overflow] = action[-overflow:]
self.rewards[0: overflow] = reward[-overflow:]
self.next_obses[0: overflow] = next_obs[-overflow:]
self.dones[0: overflow] = done[-overflow:]
self.full = True
self.obses[self.idx: self.idx + remaining_capacity] = obs[:remaining_capacity]
self.actions[self.idx: self.idx + remaining_capacity] = action[:remaining_capacity]
self.rewards[self.idx: self.idx + remaining_capacity] = reward[:remaining_capacity]
self.next_obses[self.idx: self.idx + remaining_capacity] = next_obs[:remaining_capacity]
self.dones[self.idx: self.idx + remaining_capacity] = done[:remaining_capacity]
self.idx = (self.idx + num_observations) % self.capacity
self.full = self.full or self.idx == 0
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obses: torch tensor
batch of observations
actions: torch tensor
batch of actions executed given obs
rewards: torch tensor
rewards received as results of executing act_batch
next_obses: torch tensor
next set of observations seen after executing act_batch
not_dones: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not
not_dones_no_max: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not, specifically exlcuding maximum episode steps
"""
idxs = torch.randint(0,
self.capacity if self.full else self.idx,
(batch_size,), device=self.device)
obses = self.obses[idxs]
actions = self.actions[idxs]
rewards = self.rewards[idxs]
next_obses = self.next_obses[idxs]
dones = self.dones[idxs]
return obses, actions, rewards, next_obses, dones
class ExperienceBuffer:
'''
More generalized than replay buffers.
Implemented for on-policy algos
'''
def __init__(self, env_info, algo_info, device, aux_tensor_dict=None):
self.env_info = env_info
self.algo_info = algo_info
self.device = device
self.num_agents = env_info.get('agents', 1)
self.action_space = env_info['action_space']
self.num_actors = algo_info['num_actors']
self.horizon_length = algo_info['horizon_length']
self.has_central_value = algo_info['has_central_value']
self.use_action_masks = algo_info.get('use_action_masks', False)
batch_size = self.num_actors * self.num_agents
self.is_discrete = False
self.is_multi_discrete = False
self.is_continuous = False
self.obs_base_shape = (self.horizon_length, self.num_agents * self.num_actors)
self.state_base_shape = (self.horizon_length, self.num_actors)
if type(self.action_space) is gym.spaces.Discrete:
self.actions_shape = ()
self.actions_num = self.action_space.n
self.is_discrete = True
if type(self.action_space) is gym.spaces.Tuple:
self.actions_shape = (len(self.action_space),)
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
if type(self.action_space) is gym.spaces.Box:
self.actions_shape = (self.action_space.shape[0],)
self.actions_num = self.action_space.shape[0]
self.is_continuous = True
self.tensor_dict = {}
self._init_from_env_info(self.env_info)
self.aux_tensor_dict = aux_tensor_dict
if self.aux_tensor_dict is not None:
self._init_from_aux_dict(self.aux_tensor_dict)
def _init_from_env_info(self, env_info):
obs_base_shape = self.obs_base_shape
state_base_shape = self.state_base_shape
self.tensor_dict['obses'] = self._create_tensor_from_space(env_info['observation_space'], obs_base_shape)
if self.has_central_value:
self.tensor_dict['states'] = self._create_tensor_from_space(env_info['state_space'], state_base_shape)
val_space = gym.spaces.Box(low=0, high=1,shape=(env_info.get('value_size',1),))
self.tensor_dict['rewards'] = self._create_tensor_from_space(val_space, obs_base_shape)
self.tensor_dict['values'] = self._create_tensor_from_space(val_space, obs_base_shape)
self.tensor_dict['neglogpacs'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(), dtype=np.float32), obs_base_shape)
self.tensor_dict['dones'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(), dtype=np.uint8), obs_base_shape)
if self.is_discrete or self.is_multi_discrete:
self.tensor_dict['actions'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.long), obs_base_shape)
if self.use_action_masks:
self.tensor_dict['action_masks'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape + (np.sum(self.actions_num),), dtype=np.bool), obs_base_shape)
if self.is_continuous:
self.tensor_dict['actions'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
self.tensor_dict['mus'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
self.tensor_dict['sigmas'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
def _init_from_aux_dict(self, tensor_dict):
obs_base_shape = self.obs_base_shape
for k,v in tensor_dict.items():
self.tensor_dict[k] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(v), dtype=np.float32), obs_base_shape)
def _create_tensor_from_space(self, space, base_shape):
if type(space) is gym.spaces.Box:
dtype = numpy_to_torch_dtype_dict[space.dtype]
return torch.zeros(base_shape + space.shape, dtype= dtype, device = self.device)
if type(space) is gym.spaces.Discrete:
dtype = numpy_to_torch_dtype_dict[space.dtype]
return torch.zeros(base_shape, dtype= dtype, device = self.device)
if type(space) is gym.spaces.Tuple:
'''
assuming that tuple is only Discrete tuple
'''
dtype = numpy_to_torch_dtype_dict[space.dtype]
tuple_len = len(space)
return torch.zeros(base_shape +(tuple_len,), dtype= dtype, device = self.device)
if type(space) is gym.spaces.Dict:
t_dict = {}
for k,v in space.spaces.items():
t_dict[k] = self._create_tensor_from_space(v, base_shape)
return t_dict
def update_data(self, name, index, val):
if type(val) is dict:
for k,v in val.items():
self.tensor_dict[name][k][index,:] = v
else:
self.tensor_dict[name][index,:] = val
def update_data_rnn(self, name, indices,play_mask, val):
if type(val) is dict:
for k,v in val:
self.tensor_dict[name][k][indices,play_mask] = v
else:
self.tensor_dict[name][indices,play_mask] = val
def get_transformed(self, transform_op):
res_dict = {}
for k, v in self.tensor_dict.items():
if type(v) is dict:
transformed_dict = {}
for kd,vd in v.items():
transformed_dict[kd] = transform_op(vd)
res_dict[k] = transformed_dict
else:
res_dict[k] = transform_op(v)
return res_dict
def get_transformed_list(self, transform_op, tensor_list):
res_dict = {}
for k in tensor_list:
v = self.tensor_dict.get(k)
if v is None:
continue
if type(v) is dict:
transformed_dict = {}
for kd,vd in v.items():
transformed_dict[kd] = transform_op(vd)
res_dict[k] = transformed_dict
else:
res_dict[k] = transform_op(v)
return res_dict
| 17,184 |
Python
| 40.211031 | 194 | 0.589967 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/segment_tree.py
|
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
| 4,888 |
Python
| 35.214815 | 109 | 0.541121 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/datasets.py
|
import torch
import copy
from torch.utils.data import Dataset
class PPODataset(Dataset):
def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len):
self.is_rnn = is_rnn
self.seq_len = seq_len
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
self.length = self.batch_size // self.minibatch_size
self.is_discrete = is_discrete
self.is_continuous = not is_discrete
total_games = self.batch_size // self.seq_len
self.num_games_batch = self.minibatch_size // self.seq_len
self.game_indexes = torch.arange(total_games, dtype=torch.long, device=self.device)
self.flat_indexes = torch.arange(total_games * self.seq_len, dtype=torch.long, device=self.device).reshape(total_games, self.seq_len)
self.special_names = ['rnn_states']
def update_values_dict(self, values_dict):
self.values_dict = values_dict
def update_mu_sigma(self, mu, sigma):
start = self.last_range[0]
end = self.last_range[1]
self.values_dict['mu'][start:end] = mu
self.values_dict['sigma'][start:end] = sigma
def __len__(self):
return self.length
def _get_item_rnn(self, idx):
gstart = idx * self.num_games_batch
gend = (idx + 1) * self.num_games_batch
start = gstart * self.seq_len
end = gend * self.seq_len
self.last_range = (start, end)
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names:
if v is dict:
v_dict = { kd:vd[start:end] for kd, vd in v.items() }
input_dict[k] = v_dict
else:
input_dict[k] = v[start:end]
rnn_states = self.values_dict['rnn_states']
input_dict['rnn_states'] = [s[:,gstart:gend,:] for s in rnn_states]
return input_dict
def _get_item(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
self.last_range = (start, end)
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names and v is not None:
if type(v) is dict:
v_dict = { kd:vd[start:end] for kd, vd in v.items() }
input_dict[k] = v_dict
else:
input_dict[k] = v[start:end]
return input_dict
def __getitem__(self, idx):
if self.is_rnn:
sample = self._get_item_rnn(idx)
else:
sample = self._get_item(idx)
return sample
class DatasetList(Dataset):
def __init__(self):
self.dataset_list = []
def __len__(self):
return self.dataset_list[0].length * len(self.dataset_list)
def add_dataset(self, dataset):
self.dataset_list.append(copy.deepcopy(dataset))
def clear(self):
self.dataset_list = []
def __getitem__(self, idx):
ds_len = len(self.dataset_list)
ds_idx = idx % ds_len
in_idx = idx // ds_len
return self.dataset_list[ds_idx].__getitem__(in_idx)
| 3,260 |
Python
| 33.326315 | 141 | 0.553067 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/player.py
|
import time
import gym
import numpy as np
import torch
from rl_games.common import env_configurations
class BasePlayer(object):
def __init__(self, config):
self.config = config
self.env_name = self.config['env_name']
self.env_config = self.config.get('env_config', {})
self.env_info = self.config.get('env_info')
if self.env_info is None:
self.env = self.create_env()
self.env_info = env_configurations.get_env_info(self.env)
self.value_size = self.env_info.get('value_size', 1)
self.action_space = self.env_info['action_space']
self.num_agents = self.env_info['agents']
self.observation_space = self.env_info['observation_space']
if isinstance(self.observation_space, gym.spaces.Dict):
self.obs_shape = {}
for k, v in self.observation_space.spaces.items():
self.obs_shape[k] = v.shape
else:
self.obs_shape = self.observation_space.shape
self.is_tensor_obses = False
self.states = None
self.player_config = self.config.get('player', {})
self.use_cuda = True
self.batch_size = 1
self.has_batch_dimension = False
self.has_central_value = self.config.get('central_value_config') is not None
self.device_name = self.player_config.get('device_name', 'cuda')
self.render_env = self.player_config.get('render', False)
self.games_num = self.player_config.get('games_num', 2000)
self.is_determenistic = self.player_config.get('determenistic', True)
self.n_game_life = self.player_config.get('n_game_life', 1)
self.print_stats = self.player_config.get('print_stats', True)
self.render_sleep = self.player_config.get('render_sleep', 0.002)
self.max_steps = 108000 // 4
self.device = torch.device(self.device_name)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
for k, v in obs_batch.items():
obs_batch[k] = self._preproc_obs(v)
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
if self.normalize_input:
obs_batch = self.running_mean_std(obs_batch)
return obs_batch
def env_step(self, env, actions):
if not self.is_tensor_obses:
actions = actions.cpu().numpy()
obs, rewards, dones, infos = env.step(actions)
if hasattr(obs, 'dtype') and obs.dtype == np.float64:
obs = np.float32(obs)
if self.value_size > 1:
rewards = rewards[0]
if self.is_tensor_obses:
return self.obs_to_torch(obs), rewards.cpu(), dones.cpu(), infos
else:
if np.isscalar(dones):
rewards = np.expand_dims(np.asarray(rewards), 0)
dones = np.expand_dims(np.asarray(dones), 0)
return self.obs_to_torch(obs), torch.from_numpy(rewards), torch.from_numpy(dones), infos
def obs_to_torch(self, obs):
if isinstance(obs, dict):
if 'obs' in obs:
obs = obs['obs']
if isinstance(obs, dict):
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value, False)
else:
upd_obs = self.cast_obs(obs)
else:
upd_obs = self.cast_obs(obs)
return upd_obs
def _obs_to_tensors_internal(self, obs, cast_to_dict=True):
if isinstance(obs, dict):
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value, False)
else:
upd_obs = self.cast_obs(obs)
return upd_obs
def cast_obs(self, obs):
if isinstance(obs, torch.Tensor):
self.is_tensor_obses = True
elif isinstance(obs, np.ndarray):
assert(self.observation_space.dtype != np.int8)
if self.observation_space.dtype == np.uint8:
obs = torch.ByteTensor(obs).to(self.device)
else:
obs = torch.FloatTensor(obs).to(self.device)
return obs
def preprocess_actions(self, actions):
if not self.is_tensor_obses:
actions = actions.cpu().numpy()
return actions
def env_reset(self, env):
obs = env.reset()
return self.obs_to_torch(obs)
def restore(self, fn):
raise NotImplementedError('restore')
def get_weights(self):
weights = {}
weights['model'] = self.model.state_dict()
if self.normalize_input:
weights['running_mean_std'] = self.running_mean_std.state_dict()
return weights
def set_weights(self, weights):
self.model.load_state_dict(weights['model'])
if self.normalize_input:
self.running_mean_std.load_state_dict(weights['running_mean_std'])
def create_env(self):
return env_configurations.configurations[self.env_name]['env_creator'](**self.env_config)
def get_action(self, obs, is_determenistic=False):
raise NotImplementedError('step')
def get_masked_action(self, obs, mask, is_determenistic=False):
raise NotImplementedError('step')
def reset(self):
raise NotImplementedError('raise')
def init_rnn(self):
if self.is_rnn:
rnn_states = self.model.get_default_rnn_state()
self.states = [torch.zeros((s.size()[0], self.batch_size, s.size(
)[2]), dtype=torch.float32).to(self.device) for s in rnn_states]
def run(self):
n_games = self.games_num
render = self.render_env
n_game_life = self.n_game_life
is_determenistic = self.is_determenistic
sum_rewards = 0
sum_steps = 0
sum_game_res = 0
n_games = n_games * n_game_life
games_played = 0
has_masks = False
has_masks_func = getattr(self.env, "has_action_mask", None) is not None
op_agent = getattr(self.env, "create_agent", None)
if op_agent:
agent_inited = True
#print('setting agent weights for selfplay')
# self.env.create_agent(self.env.config)
# self.env.set_weights(range(8),self.get_weights())
if has_masks_func:
has_masks = self.env.has_action_mask()
need_init_rnn = self.is_rnn
for _ in range(n_games):
if games_played >= n_games:
break
obses = self.env_reset(self.env)
batch_size = 1
batch_size = self.get_batch_size(obses, batch_size)
if need_init_rnn:
self.init_rnn()
need_init_rnn = False
cr = torch.zeros(batch_size, dtype=torch.float32)
steps = torch.zeros(batch_size, dtype=torch.float32)
print_game_res = False
for n in range(self.max_steps):
if has_masks:
masks = self.env.get_action_mask()
action = self.get_masked_action(
obses, masks, is_determenistic)
else:
action = self.get_action(obses, is_determenistic)
obses, r, done, info = self.env_step(self.env, action)
cr += r
steps += 1
if render:
self.env.render(mode='human')
time.sleep(self.render_sleep)
all_done_indices = done.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
done_count = len(done_indices)
games_played += done_count
if done_count > 0:
if self.is_rnn:
for s in self.states:
s[:, all_done_indices, :] = s[:,
all_done_indices, :] * 0.0
cur_rewards = cr[done_indices].sum().item()
cur_steps = steps[done_indices].sum().item()
cr = cr * (1.0 - done.float())
steps = steps * (1.0 - done.float())
sum_rewards += cur_rewards
sum_steps += cur_steps
game_res = 0.0
if isinstance(info, dict):
if 'battle_won' in info:
print_game_res = True
game_res = info.get('battle_won', 0.5)
if 'scores' in info:
print_game_res = True
game_res = info.get('scores', 0.5)
if self.print_stats:
if print_game_res:
print('reward:', cur_rewards/done_count,
'steps:', cur_steps/done_count, 'w:', game_res)
else:
print('reward:', cur_rewards/done_count,
'steps:', cur_steps/done_count)
sum_game_res += game_res
if batch_size//self.num_agents == 1 or games_played >= n_games:
break
print(sum_rewards)
if print_game_res:
print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps /
games_played * n_game_life, 'winrate:', sum_game_res / games_played * n_game_life)
else:
print('av reward:', sum_rewards / games_played * n_game_life,
'av steps:', sum_steps / games_played * n_game_life)
def get_batch_size(self, obses, batch_size):
obs_shape = self.obs_shape
if type(self.obs_shape) is dict:
if 'obs' in obses:
obses = obses['obs']
keys_view = self.obs_shape.keys()
keys_iterator = iter(keys_view)
first_key = next(keys_iterator)
obs_shape = self.obs_shape[first_key]
obses = obses[first_key]
if len(obses.size()) > len(obs_shape):
batch_size = obses.size()[0]
self.has_batch_dimension = True
self.batch_size = batch_size
return batch_size
| 10,359 |
Python
| 37.37037 | 100 | 0.529009 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/categorical.py
|
import numpy as np
class CategoricalQ:
def __init__(self, n_atoms, v_min, v_max):
self.n_atoms = n_atoms
self.v_min = v_min
self.v_max = v_max
self.delta_z = (v_max - v_min) / (n_atoms - 1)
def distr_projection(self, next_distr, rewards, dones, gamma):
"""
Perform distribution projection aka Catergorical Algorithm from the
"A Distributional Perspective on RL" paper
"""
proj_distr = np.zeros_like(next_distr, dtype=np.float32)
n_atoms = self.n_atoms
v_min = self.v_min
v_max = self.v_max
delta_z = self.delta_z
for atom in range(n_atoms):
z = rewards + (v_min + atom * delta_z) * gamma
tz_j = np.clip(z, v_min, v_max)
b_j = (tz_j - v_min) / delta_z
l = np.floor(b_j).astype(np.int64)
u = np.ceil(b_j).astype(np.int64)
eq_mask = u == l
proj_distr[eq_mask, l[eq_mask]] += next_distr[eq_mask, atom]
ne_mask = u != l
proj_distr[ne_mask, l[ne_mask]] += next_distr[ne_mask, atom] * (u - b_j)[ne_mask]
proj_distr[ne_mask, u[ne_mask]] += next_distr[ne_mask, atom] * (b_j - l)[ne_mask]
if dones.any():
proj_distr[dones] = 0.0
tz_j = np.clip(rewards[dones], v_min, v_max)
b_j = (tz_j - v_min) / delta_z
l = np.floor(b_j).astype(np.int64)
u = np.ceil(b_j).astype(np.int64)
eq_mask = u == l
eq_dones = dones.copy()
eq_dones[dones] = eq_mask
if eq_dones.any():
proj_distr[eq_dones, l[eq_mask]] = 1.0
ne_mask = u != l
ne_dones = dones.copy()
ne_dones[dones] = ne_mask
if ne_dones.any():
proj_distr[ne_dones, l[ne_mask]] = (u - b_j)[ne_mask]
proj_distr[ne_dones, u[ne_mask]] = (b_j - l)[ne_mask]
return proj_distr
| 1,977 |
Python
| 37.784313 | 93 | 0.49216 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/divergence.py
|
import torch
import torch.distributions as dist
def d_kl_discrete(p, q):
# p = target, q = online
# categorical distribution parametrized by logits
logits_diff = p - q
p_probs = torch.exp(p)
d_kl = (p_probs * logits_diff).sum(-1)
return d_kl
def d_kl_discrete_list(p, q):
d_kl = 0
for pi, qi in zip(p,q):
d_kl += d_kl_discrete(pi, qi)
return d_kl
def d_kl_normal(p, q):
# p = target, q = online
p_mean, p_sigma = p
q_mean, q_sigma = q
mean_diff = ((q_mean - p_mean) / q_sigma).pow(2)
var_ratio = (p_sigma / q_sigma).pow(2)
d_kl = 0.5 * (var_ratio + mean_diff - 1 - var_ratio.log())
return d_kl.sum(-1)
| 680 |
Python
| 22.482758 | 62 | 0.570588 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/algo_observer.py
|
from rl_games.algos_torch import torch_ext
import torch
import numpy as np
class AlgoObserver:
def __init__(self):
pass
def before_init(self, base_name, config, experiment_name):
pass
def after_init(self, algo):
pass
def process_infos(self, infos, done_indices):
pass
def after_steps(self):
pass
def after_print_stats(self, frame, epoch_num, total_time):
pass
class DefaultAlgoObserver(AlgoObserver):
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.game_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
if not infos:
return
if not isinstance(infos, dict) and len(infos) > 0 and isinstance(infos[0], dict):
done_indices = done_indices.cpu()
for ind in done_indices:
ind = ind.item()
if len(infos) <= ind//self.algo.num_agents:
continue
info = infos[ind//self.algo.num_agents]
game_res = None
if 'battle_won' in info:
game_res = info['battle_won']
if 'scores' in info:
game_res = info['scores']
if game_res is not None:
self.game_scores.update(torch.from_numpy(np.asarray([game_res])).to(self.algo.ppo_device))
def after_clear_stats(self):
self.game_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.game_scores.current_size > 0 and self.writer is not None:
mean_scores = self.game_scores.get_mean()
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/iter', mean_scores, epoch_num)
self.writer.add_scalar('scores/time', mean_scores, total_time)
| 2,001 |
Python
| 31.290322 | 110 | 0.578211 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/ivecenv.py
|
class IVecEnv:
def step(self, actions):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def has_action_masks(self):
return False
def get_number_of_agents(self):
return 1
def get_env_info(self):
pass
def set_train_info(self, env_frames, *args, **kwargs):
"""
Send the information in the direction algo->environment.
Most common use case: tell the environment how far along we are in the training process. This is useful
for implementing curriculums and things such as that.
"""
pass
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return None
def set_env_state(self, env_state):
pass
| 905 |
Python
| 25.647058 | 111 | 0.624309 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/vecenv.py
|
import ray
from rl_games.common.ivecenv import IVecEnv
from rl_games.common.env_configurations import configurations
from rl_games.common.tr_helpers import dicts_to_dict_with_arrays
import numpy as np
import gym
from time import sleep
class RayWorker:
def __init__(self, config_name, config):
self.env = configurations[config_name]['env_creator'](**config)
#self.obs = self.env.reset()
def step(self, action):
next_state, reward, is_done, info = self.env.step(action)
if np.isscalar(is_done):
episode_done = is_done
else:
episode_done = is_done.all()
if episode_done:
next_state = self.reset()
if isinstance(next_state, dict):
for k,v in next_state.items():
if isinstance(v, dict):
for dk,dv in v.items():
if dv.dtype == np.float64:
v[dk] = dv.astype(np.float32)
else:
if v.dtype == np.float64:
next_state[k] = v.astype(np.float32)
else:
if next_state.dtype == np.float64:
next_state = next_state.astype(np.float32)
return next_state, reward, is_done, info
def render(self):
self.env.render()
def reset(self):
self.obs = self.env.reset()
return self.obs
def get_action_mask(self):
return self.env.get_action_mask()
def get_number_of_agents(self):
if hasattr(self.env, 'get_number_of_agents'):
return self.env.get_number_of_agents()
else:
return 1
def set_weights(self, weights):
self.env.update_weights(weights)
def can_concat_infos(self):
if hasattr(self.env, 'concat_infos'):
return self.env.concat_infos
else:
return False
def get_env_info(self):
info = {}
observation_space = self.env.observation_space
#if isinstance(observation_space, gym.spaces.dict.Dict):
# observation_space = observation_space['observations']
info['action_space'] = self.env.action_space
info['observation_space'] = observation_space
info['state_space'] = None
info['use_global_observations'] = False
info['agents'] = self.get_number_of_agents()
info['value_size'] = 1
if hasattr(self.env, 'use_central_value'):
info['use_global_observations'] = self.env.use_central_value
if hasattr(self.env, 'value_size'):
info['value_size'] = self.env.value_size
if hasattr(self.env, 'state_space'):
info['state_space'] = self.env.state_space
return info
class RayVecEnv(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.config_name = config_name
self.num_actors = num_actors
self.use_torch = False
self.remote_worker = ray.remote(RayWorker)
self.workers = [self.remote_worker.remote(self.config_name, kwargs) for i in range(self.num_actors)]
res = self.workers[0].get_number_of_agents.remote()
self.num_agents = ray.get(res)
res = self.workers[0].get_env_info.remote()
env_info = ray.get(res)
res = self.workers[0].can_concat_infos.remote()
can_concat_infos = ray.get(res)
self.use_global_obs = env_info['use_global_observations']
self.concat_infos = can_concat_infos
self.obs_type_dict = type(env_info.get('observation_space')) is gym.spaces.Dict
self.state_type_dict = type(env_info.get('state_space')) is gym.spaces.Dict
if self.num_agents == 1:
self.concat_func = np.stack
else:
self.concat_func = np.concatenate
def step(self, actions):
newobs, newstates, newrewards, newdones, newinfos = [], [], [], [], []
res_obs = []
if self.num_agents == 1:
for (action, worker) in zip(actions, self.workers):
res_obs.append(worker.step.remote(action))
else:
for num, worker in enumerate(self.workers):
res_obs.append(worker.step.remote(actions[self.num_agents * num: self.num_agents * num + self.num_agents]))
all_res = ray.get(res_obs)
for res in all_res:
cobs, crewards, cdones, cinfos = res
if self.use_global_obs:
newobs.append(cobs["obs"])
newstates.append(cobs["state"])
else:
newobs.append(cobs)
newrewards.append(crewards)
newdones.append(cdones)
newinfos.append(cinfos)
if self.obs_type_dict:
ret_obs = dicts_to_dict_with_arrays(newobs, self.num_agents == 1)
else:
ret_obs = self.concat_func(newobs)
if self.use_global_obs:
newobsdict = {}
newobsdict["obs"] = ret_obs
if self.state_type_dict:
newobsdict["states"] = dicts_to_dict_with_arrays(newstates, True)
else:
newobsdict["states"] = np.stack(newstates)
ret_obs = newobsdict
if self.concat_infos:
newinfos = dicts_to_dict_with_arrays(newinfos, False)
return ret_obs, self.concat_func(newrewards), self.concat_func(newdones), newinfos
def get_env_info(self):
res = self.workers[0].get_env_info.remote()
return ray.get(res)
def set_weights(self, indices, weights):
res = []
for ind in indices:
res.append(self.workers[ind].set_weights.remote(weights))
ray.get(res)
def has_action_masks(self):
return True
def get_action_masks(self):
mask = [worker.get_action_mask.remote() for worker in self.workers]
return np.asarray(ray.get(mask), dtype=np.int32)
def reset(self):
res_obs = [worker.reset.remote() for worker in self.workers]
newobs, newstates = [],[]
for res in res_obs:
cobs = ray.get(res)
if self.use_global_obs:
newobs.append(cobs["obs"])
newstates.append(cobs["state"])
else:
newobs.append(cobs)
if self.obs_type_dict:
ret_obs = dicts_to_dict_with_arrays(newobs, self.num_agents == 1)
else:
ret_obs = self.concat_func(newobs)
if self.use_global_obs:
newobsdict = {}
newobsdict["obs"] = ret_obs
if self.state_type_dict:
newobsdict["states"] = dicts_to_dict_with_arrays(newstates, True)
else:
newobsdict["states"] = np.stack(newstates)
ret_obs = newobsdict
return ret_obs
# todo rename multi-agent
class RayVecSMACEnv(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.config_name = config_name
self.num_actors = num_actors
self.remote_worker = ray.remote(RayWorker)
self.workers = [self.remote_worker.remote(self.config_name, kwargs) for i in range(self.num_actors)]
res = self.workers[0].get_number_of_agents.remote()
self.num_agents = ray.get(res)
res = self.workers[0].get_env_info.remote()
env_info = ray.get(res)
self.use_global_obs = env_info['use_global_observations']
def get_env_info(self):
res = self.workers[0].get_env_info.remote()
return ray.get(res)
def get_number_of_agents(self):
return self.num_agents
def step(self, actions):
newobs, newstates, newrewards, newdones, newinfos = [], [], [], [], []
newobsdict = {}
res_obs, res_state = [], []
for num, worker in enumerate(self.workers):
res_obs.append(worker.step.remote(actions[self.num_agents * num: self.num_agents * num + self.num_agents]))
for res in res_obs:
cobs, crewards, cdones, cinfos = ray.get(res)
if self.use_global_obs:
newobs.append(cobs["obs"])
newstates.append(cobs["state"])
else:
newobs.append(cobs)
newrewards.append(crewards)
newdones.append(cdones)
newinfos.append(cinfos)
if self.use_global_obs:
newobsdict["obs"] = np.concatenate(newobs, axis=0)
newobsdict["states"] = np.asarray(newstates)
ret_obs = newobsdict
else:
ret_obs = np.concatenate(newobs, axis=0)
return ret_obs, np.concatenate(newrewards, axis=0), np.concatenate(newdones, axis=0), newinfos
def has_action_masks(self):
return True
def get_action_masks(self):
mask = [worker.get_action_mask.remote() for worker in self.workers]
masks = ray.get(mask)
return np.concatenate(masks, axis=0)
def reset(self):
res_obs = [worker.reset.remote() for worker in self.workers]
if self.use_global_obs:
newobs, newstates = [],[]
for res in res_obs:
cobs = ray.get(res)
if self.use_global_obs:
newobs.append(cobs["obs"])
newstates.append(cobs["state"])
else:
newobs.append(cobs)
newobsdict = {}
newobsdict["obs"] = np.concatenate(newobs, axis=0)
newobsdict["states"] = np.asarray(newstates)
ret_obs = newobsdict
else:
ret_obs = ray.get(res_obs)
ret_obs = np.concatenate(ret_obs, axis=0)
return ret_obs
vecenv_config = {}
def register(config_name, func):
vecenv_config[config_name] = func
def create_vec_env(config_name, num_actors, **kwargs):
vec_env_name = configurations[config_name]['vecenv_type']
return vecenv_config[vec_env_name](config_name, num_actors, **kwargs)
register('RAY', lambda config_name, num_actors, **kwargs: RayVecEnv(config_name, num_actors, **kwargs))
register('RAY_SMAC', lambda config_name, num_actors, **kwargs: RayVecSMACEnv(config_name, num_actors, **kwargs))
from rl_games.envs.brax import BraxEnv
register('BRAX', lambda config_name, num_actors, **kwargs: BraxEnv(config_name, num_actors, **kwargs))
| 10,351 |
Python
| 34.696552 | 123 | 0.571539 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/tr_helpers.py
|
import numpy as np
from collections import defaultdict
class LinearValueProcessor:
def __init__(self, start_eps, end_eps, end_eps_frames):
self.start_eps = start_eps
self.end_eps = end_eps
self.end_eps_frames = end_eps_frames
def __call__(self, frame):
if frame >= self.end_eps_frames:
return self.end_eps
df = frame / self.end_eps_frames
return df * self.end_eps + (1.0 - df) * self.start_eps
class DefaultRewardsShaper:
def __init__(self, scale_value = 1, shift_value = 0, min_val=-np.inf, max_val=np.inf, is_torch=True):
self.scale_value = scale_value
self.shift_value = shift_value
self.min_val = min_val
self.max_val = max_val
self.is_torch = is_torch
def __call__(self, reward):
reward = reward + self.shift_value
reward = reward * self.scale_value
if self.is_torch:
import torch
reward = torch.clamp(reward, self.min_val, self.max_val)
else:
reward = np.clip(reward, self.min_val, self.max_val)
return reward
def dicts_to_dict_with_arrays(dicts, add_batch_dim = True):
def stack(v):
if len(np.shape(v)) == 1:
return np.array(v)
else:
return np.stack(v)
def concatenate(v):
if len(np.shape(v)) == 1:
return np.array(v)
else:
return np.concatenate(v)
dicts_len = len(dicts)
if(dicts_len <= 1):
return dicts
res = defaultdict(list)
{ res[key].append(sub[key]) for sub in dicts for key in sub }
if add_batch_dim:
concat_func = stack
else:
concat_func = concatenate
res = {k : concat_func(v) for k,v in res.items()}
return res
def unsqueeze_obs(obs):
if type(obs) is dict:
for k,v in obs.items():
obs[k] = unsqueeze_obs(v)
else:
obs = obs.unsqueeze(0)
return obs
def flatten_first_two_dims(arr):
if arr.ndim > 2:
return arr.reshape(-1, *arr.shape[-(arr.ndim-2):])
else:
return arr.reshape(-1)
def free_mem():
import ctypes
ctypes.CDLL('libc.so.6').malloc_trim(0)
| 2,203 |
Python
| 26.898734 | 105 | 0.568316 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/object_factory.py
|
class ObjectFactory:
def __init__(self):
self._builders = {}
def register_builder(self, name, builder):
self._builders[name] = builder
def set_builders(self, builders):
self._builders = builders
def create(self, name, **kwargs):
builder = self._builders.get(name)
if not builder:
raise ValueError(name)
return builder(**kwargs)
| 414 |
Python
| 26.666665 | 46 | 0.584541 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/transforms/soft_augmentation.py
|
from rl_games.common.transforms import transforms
import torch
class SoftAugmentation():
def __init__(self, **kwargs):
self.transform_config = kwargs.pop('transform')
self.aug_coef = kwargs.pop('aug_coef', 0.001)
print('aug coef:', self.aug_coef)
self.name = self.transform_config['name']
#TODO: remove hardcode
self.transform = transforms.ImageDatasetTransform(**self.transform_config)
def get_coef(self):
return self.aug_coef
def get_loss(self, p_dict, model, input_dict, loss_type = 'both'):
'''
loss_type: 'critic', 'policy', 'both'
'''
if self.transform:
input_dict = self.transform(input_dict)
loss = 0
q_dict = model(input_dict)
if loss_type == 'policy' or loss_type == 'both':
p_dict['logits'] = p_dict['logits'].detach()
loss = model.kl(p_dict, q_dict)
if loss_type == 'critic' or loss_type == 'both':
p_value = p_dict['value'].detach()
q_value = q_dict['value']
loss = loss + (0.5 * (p_value - q_value)**2).sum(dim=-1)
return loss
| 1,165 |
Python
| 34.333332 | 82 | 0.559657 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/transforms/transforms.py
|
import torch
from torch import nn
class DatasetTransform(nn.Module):
def __init__(self):
super().__init__()
def forward(self, dataset):
return dataset
class ImageDatasetTransform(DatasetTransform):
def __init__(self, **kwargs):
super().__init__()
import kornia
self.transform = torch.nn.Sequential(
nn.ReplicationPad2d(4),
kornia.augmentation.RandomCrop((84,84))
#kornia.augmentation.RandomErasing(p=0.2),
#kornia.augmentation.RandomAffine(degrees=0, translate=(2.0/84,2.0/84), p=1),
#kornia.augmentation.RandomCrop((84,84))
)
def forward(self, dataset):
dataset['obs'] = self.transform(dataset['obs'])
return dataset
| 746 |
Python
| 27.730768 | 85 | 0.619303 |
RoboticExplorationLab/CGAC/cgac/main.py
|
import sys, os
import argparse
import datetime
import time
import numpy as np
import itertools
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env-name', default="SNUHumanoidEnv", choices=["AntEnv", "HumanoidEnv", "SNUHumanoidEnv", "CartPoleSwingUpEnv", "CheetahEnv", "HopperEnv", "AllegroHand"])
parser.add_argument('--policy', default="Gaussian", help='Policy Type: Gaussian | Deterministic (default: Gaussian)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau_value', type=float, default=0.005, metavar='G', help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument('--tau_policy', type=float, default=0.001)
parser.add_argument('--lr', type=float, default=0.002)
parser.add_argument('--final_lr', type=float, default=1e-4)
parser.add_argument('--alpha', type=float, default=0.1, metavar='G', help='Temperature parameter α determines the relative importance of the entropy term against the reward (default: 0.2)')
parser.add_argument('--alpha_final', type=float, default=2e-5)
parser.add_argument('--no_automatic_entropy_tuning', action="store_true", help='Automaically adjust α (default: False)')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--batch_size_update', type=int, default=8192*2)
parser.add_argument('--num_actors', type=int, default=4096)
parser.add_argument('--num_steps', type=int, default=500000001)
parser.add_argument('--critic_hidden', nargs='+', type=int, default=[512, 512, 256])
parser.add_argument('--actor_hidden', nargs='+', type=int, default=[512, 256])
parser.add_argument('--critic_act', type=str, default='elu', choices=['elu', 'tanh', 'relu'])
parser.add_argument('--actor_act', type=str, default='elu', choices=['elu', 'tanh', 'relu'])
parser.add_argument('--num_critic_updates', type=int, default=2)
parser.add_argument('--val_horizon', type=int, default=32)
parser.add_argument('--num_steps_buffer', type=int, default=32)
parser.add_argument('--betas', nargs='+', type=float, default=[0.9, 0.999])
parser.add_argument('--lam', type=float, default=0.95)
parser.add_argument('--no_const_std', action='store_true')
parser.add_argument('--grad_norm', type=float, default=20)
parser.add_argument('--actor_grad_norm', type=float, default=4)
parser.add_argument('--clip_actor_gn', action='store_true')
parser.add_argument('--max_updates', type=int, default=20000)
parser.add_argument('--lr_schedule', type=str, default='linear', choices=['linear', 'decay', 'constant'])
parser.add_argument('--alpha_schedule', type=str, default='linear', choices=['linear', 'decay', 'constant'])
parser.add_argument('--final_targ_ent_coeff', type=float, default=3.5)
parser.add_argument('--init_targ_ent_coeff', type=float, default=0.2)
parser.add_argument('--peak_expected_reward', type=float, default=7.5)
parser.add_argument('--init_expected_reward', type=float, default=1.5)
parser.add_argument('--critic_method', type=str, default='gae-return', choices=['gae-return', 'td-lambda', 'one-step'])
parser.add_argument('--episode_length', type=int, default=1000)
parser.add_argument('--no_stochastic_init', action='store_true')
parser.add_argument('--policy_clip', action='store_true')
parser.add_argument('--cuda', action="store_true")
parser.add_argument('--target_update_interval', type=int, default=1, metavar='N', help='Value target update per no. of updates per step (default: 1)')
parser.add_argument('--id', type=str, default='0')
parser.add_argument('--desc', type=str, default='')
parser.add_argument('--final', action='store_true')
parser.add_argument('--start_steps', type=int, default=80000, metavar='N', help='Steps sampling random actions (default: 10000)')
parser.add_argument('--replay_size', type=int, default=1000000)
parser.add_argument('--test', action="store_true")
parser.add_argument('--eval', type=bool, default=True, help='Evaluates a policy a policy every 10 episode (default: True)')
parser.add_argument('--on_policy_update', action='store_true')
parser.add_argument('--reduction_rate_updates', type=int, default=8000)
# parser.add_argument('--num_steps_buffer', type=int, default=32)
parser.add_argument('--max_updates_alpha', type=int, default=8000)
parser.add_argument('--decay_steps', type=int, default=2000)
parser.add_argument('--lr_update_freq', type=int, default=1000)
parser.add_argument('--lr_decay_rate', type=float, default=0.75)
args = parser.parse_args()
args.automatic_entropy_tuning = not args.no_automatic_entropy_tuning
args.batch_size = args.num_actors
args.critic_lr = args.lr
args.actor_lr = args.lr
args.alpha_lr = args.lr*10
args.alpha_init = args.alpha*4
# args.alpha_final = args.alpha/4
args.max_rand_resets = args.batch_size//args.episode_length
args.horizon_shrink_steps = 12000/args.num_steps_buffer
args.const_std = not args.no_const_std
args.no_grad_train = not args.grad_train
args.stochastic_init = not args.no_stochastic_init
device_str = "cuda:0" if args.cuda else "cpu"
device = args.device = device_str
if args.env_name=='AllegroHand':
rl_games_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../rl_games/'))
sys.path.append(rl_games_dir)
import isaacgym
import isaacgymenvs
import torch
env = isaacgymenvs.make(
args.seed,
args.env_name,
args.batch_size,
device_str,
device_str,
graphics_device_id=0,
headless=True)
env.actions = torch.zeros((env.num_envs, env.num_actions), device=device, dtype=torch.float)
args.episode_length = 600
args.max_rand_resets = 0
else:
import dflex as df
import envs
import torch
from utils.common import *
if args.env_name == "HumanoidEnv":
args.MM_caching_frequency = 48
elif args.env_name == "SNUHumanoidEnv":
args.MM_caching_frequency = 8
elif args.env_name == "AntEnv":
args.MM_caching_frequency = 16
seeding(args.seed)
env_fn = getattr(envs, args.env_name)
env = env_fn(num_envs = args.batch_size, \
device = args.device, \
render = False, \
seed = args.seed, \
episode_length=args.episode_length, \
stochastic_init = args.stochastic_init, \
MM_caching_frequency = args.MM_caching_frequency, \
no_grad=args.no_grad_train)
from cgac.cgac import CGAC
from torch.utils.tensorboard import SummaryWriter
from replay_memory import VectorizedReplayBufferIsaacSAC
from utils_cgac import *
from utils.common import *
seeding(args.seed)
memory_cgac = VectorizedReplayBufferIsaacSAC(env.obs_space.shape, env.act_space.shape, args.batch_size, args.num_steps_buffer, args.device, gamma=args.gamma, lam=args.lam, horizon=args.val_horizon, critic_method=args.critic_method)
agent = CGAC(env.obs_space.shape[0], env.act_space, args, memory_cgac, env)
if not args.test:
save_dir = f'runs/hp/{args.env_name}/RL_final/tauval{args.tau_value}pi{args.tau_policy}_{args.actor_act}{args.actor_hidden}_\
{args.critic_act}{args.critic_hidden}_actors{args.num_actors}bszup{args.batch_size_update}_alphaautolin{int(args.final_targ_ent_coeff)}_\
{args.alpha}fin{args.alpha_final}_criticups{args.num_critic_updates}_bufs{args.num_steps_buffer}h{args.val_horizon}_seed{args.seed}_\
piclip{args.policy_clip}_{args.desc}'
print('save_dir : ', save_dir)
writer = SummaryWriter(save_dir)
if args.env_name=='AllegroHand':
RL_update_func = agent.update_parameters_and_collect_buffer_RL_isaac
else:
RL_update_func = agent.update_parameters_and_collect_buffer_RL
# Training Loop
total_numsteps = 0
updates = 0
total_updates = 0
num_episodes = 0
episode_steps = np.zeros(args.num_actors)
episode_rewards = torch.zeros(args.num_actors).to(device)
total_episode_reward_hist = []
episode_len_hist = []
agent.times = []
last = time.time()
start_time = time.time()
for i_episode in itertools.count(1):
critic_1_grad_state_loss = None
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha, log_pi, min_qf_pi, next_values, \
qf_loss_batch, actor_grad_norm, critic_grad_norm, = RL_update_func(memory_cgac, args.batch_size, updates)
if not args.test and not args.final:
writer.add_scalar('loss/critic_1', critic_1_loss, updates)
writer.add_scalar('loss/critic_2', critic_2_loss, updates)
writer.add_scalar('loss/policy', policy_loss, updates)
writer.add_scalar('loss/entropy_loss', ent_loss, updates)
writer.add_scalar('loss/log_pi', log_pi, updates)
writer.add_scalar('loss/min_qf_pi', min_qf_pi, updates)
writer.add_scalar('entropy_temprature/alpha', alpha, updates)
writer.add_scalar('entropy_temprature/actor_grad_norm', actor_grad_norm, updates)
writer.add_scalar('entropy_temprature/critic_grad_norm', critic_grad_norm, updates)
writer.add_scalar('losses/val_targ_max', next_values.max().item(), updates)
writer.add_scalar('losses/val_targ_mean', next_values.mean().item(), updates)
writer.add_scalar('losses/val_targ_min', next_values.min().item(), updates)
writer.add_scalar('losses/loss_qf_max', qf_loss_batch.max().item(), updates)
writer.add_scalar('losses/loss_qf_mean', qf_loss_batch.mean().item(), updates)
writer.add_scalar('losses/loss_qf_min', qf_loss_batch.min().item(), updates)
writer.add_scalar('losses/loss_qf_median', qf_loss_batch.median().item(), updates)
writer.add_scalar('entropy_temprature/num_nans', agent.num_nans, updates)
writer.add_scalar('agent_rewards/rew1', agent.episode_rewards[0], i_episode)
writer.add_scalar('agent_rewards/rew10', agent.episode_rewards[10], i_episode)
writer.add_scalar('agent_rewards/rew100', agent.episode_rewards[20], i_episode)
writer.add_scalar('agent_rewards/rew1k', agent.episode_rewards[40], i_episode)
writer.add_scalar('agent_rewards/eplenavg', agent.env_train.progress_buf.clone().float().mean().item(), i_episode)
writer.add_scalar('agent_done_stats/done_len', agent.len_dones, i_episode)
writer.add_scalar('agent_done_stats/queue_len', agent.len_queue, i_episode)
writer.add_scalar('agent_done_stats/env_thres', agent.env_thres, i_episode)
writer.add_scalar('agent_done_stats/memory_cgac_horizon', memory_cgac.h, i_episode)
writer.add_scalar('agent_done_stats/sum_episode_wts', (1-agent.episode_wts).sum(), i_episode)
writer.add_scalar('agent_done_stats/agent_curr_rew', agent.reward_batch_curr, i_episode)
updates += 1
total_updates += args.num_critic_updates
total_numsteps = updates*args.batch_size
if total_numsteps > args.num_steps:
break
if i_episode%100 == 0:
time_taken = time.time() - last
last = time.time()
total_time = time.time() - start_time
if len(agent.total_episode_reward_hist)>0:
num_test_avg = 100
if not args.test:
writer.add_scalar('reward/train', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean(), i_episode)
writer.add_scalar('reward/train_recent', agent.total_episode_reward_hist[-1], i_episode)
writer.add_scalar('reward/ep_len', np.array(agent.episode_len_hist[-num_test_avg:]).mean(), i_episode)
writer.add_scalar('reward/rew_eplen', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean()/np.array(agent.episode_len_hist[-num_test_avg:]).mean(), i_episode)
writer.add_scalar('reward/train_agent', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean(), i_episode)
writer.add_scalar('reward/ep_len_agent', np.array(agent.episode_len_hist[-num_test_avg:]).mean(), i_episode)
writer.add_scalar('reward/rew_eplen_agent', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean()/np.array(agent.episode_len_hist[-num_test_avg:]).mean(), i_episode)
writer.add_scalar('reward/train_time', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean(), total_time)
writer.add_scalar('reward/ep_len_time', np.array(agent.episode_len_hist[-num_test_avg:]).mean(), total_time)
writer.add_scalar('reward/train_steps', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean(), total_numsteps)
writer.add_scalar('reward/ep_len_steps', np.array(agent.episode_len_hist[-num_test_avg:]).mean(), total_numsteps)
# writer.add_scalar('variance/snr_q', agent.snr_q, total_numsteps)
# writer.add_scalar('variance/snr_q_iter', agent.snr_q, i_episode)
# writer.add_scalar('variance/ppo_snr', agent.ppo_snr, total_numsteps)
# writer.add_scalar('variance/ppo_snr_iter', agent.ppo_snr, i_episode)
# writer.add_scalar('variance/ppo_snr_adv', agent.ppo_snr_adv, total_numsteps)
# writer.add_scalar('variance/ppo_snr_adv_iter', agent.ppo_snr_adv, i_episode)
print(f"iters: {i_episode}, total numsteps: {total_numsteps}, num ep: {agent.num_episodes}, episode steps: {np.array(agent.episode_len_hist[-num_test_avg:]).mean()}, reward: {np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean()}, progress_buf: {env.progress_buf.min()}, time: {time_taken}, lr: {agent.lr}, num_nans: {agent.num_nans}")
else:
print(f"iters: {i_episode}, total numsteps: {total_numsteps}, num ep: {agent.num_episodes}, progress_buf: {env.progress_buf.min()}, time: {time_taken}, lr: {agent.lr}, num_nans: {agent.num_nans}")
print(agent.alpha)
env.close()
| 13,888 |
Python
| 57.851695 | 358 | 0.682316 |
RoboticExplorationLab/CGAC/cgac/replay_memory.py
|
import random
import numpy as np
import torch
import ipdb
class VectorizedReplayBufferIsaacSAC:
def __init__(self, obs_shape, action_shape, batch_size, num_steps, device, gamma=0.99, lam=0.95, horizon=200000, critic_method='td-lambda'):#
"""Create Vectorized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
See Also
--------
ReplayBuffer.__init__
"""
self.device = device
self.out_device = device
self.batch_size = batch_size
self.num_steps = num_steps
self.critic_method = critic_method
capacity = batch_size*num_steps
self.lam = lam
self.gamma = gamma
self.h = min(horizon, num_steps)
self.minhorizon = 1
self.actions = torch.empty((num_steps, batch_size, *action_shape), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.masks = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.term_masks = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.next_values = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.next_state_log_pi = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.states = torch.empty((num_steps, batch_size, *obs_shape), dtype=torch.float32, device=self.device)
self.episode_wts = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.val_cur = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.gamma_k = torch.ones((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.sigmar = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.Vt_new = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.lam_t = torch.ones((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.Vt_out = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.mask_t = torch.ones((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.sigmaG = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.G_prev = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.gamma_lam_k = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device)
self.sample_start_idxs = torch.zeros((batch_size,), dtype=torch.float32, device=self.device)
self.num_episodes_passed = torch.zeros((batch_size,), dtype=torch.float32, device=self.device)
self.capacity = capacity
self.idx = 0
self.full = False
@torch.no_grad()
def add(self, states, actions, rewards, next_q_value, masks, term_masks, next_state_log_pi, alpha, episode_wts, updates):
num_observations = self.batch_size
if self.idx >= self.num_steps:
self.full = True
self.idx = 0
self.actions[self.idx, :] = actions
self.states[self.idx, :] = states
self.rewards[self.idx, :] = rewards
self.masks[self.idx, :] = masks
self.term_masks[self.idx, :] = term_masks
self.next_values[self.idx, :] = next_q_value
self.next_state_log_pi[self.idx, :] = next_state_log_pi
self.episode_wts[self.idx, :] = episode_wts
self.alpha = alpha
self.compute_target_values()
self.idx += 1
@torch.no_grad()
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obses: torch tensor
batch of observations
actions: torch tensor
batch of actions executed given obs
rewards: torch tensor
rewards received as results of executing act_batch
next_obses: torch tensor
next set of observations seen after executing act_batch
not_dones: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not
not_dones_no_max: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not, specifically exlcuding maximum episode steps
"""
idxs_steps = torch.randint(0,
self.num_steps if self.full else self.idx,
(batch_size,), device=self.device)
idxs_bsz = torch.randint(0,
self.batch_size,
(batch_size,), device=self.device)
states = self.states[idxs_steps, idxs_bsz]
actions = self.actions[idxs_steps, idxs_bsz]
rewards = self.rewards[idxs_steps, idxs_bsz]
masks = self.masks[idxs_steps, idxs_bsz]
target_val = self.Vt_out[idxs_steps, idxs_bsz]
episode_wts = self.episode_wts[idxs_steps, idxs_bsz]
return states, actions, rewards, target_val, masks, episode_wts
def __len__(self):
return self.idx
@torch.no_grad()
def compute_target_values(self):
if self.critic_method == 'one-step':
self.Vt_out[self.idx] = self.rewards[self.idx] + self.gamma * self.next_values[self.idx] * self.masks[self.idx]
elif self.critic_method == 'td-lambda':
if self.full:
if self.idx>=self.h-1:
start_idx = self.idx - self.h + 1
else:
start_idx = 0
end_idx = self.idx - self.h + 1
mask = self.masks[self.idx]
self.sigmar[end_idx:] = self.sigmar[end_idx:] + self.gamma_k[end_idx:]*self.rewards[self.idx].unsqueeze(0)
G_new = self.sigmar[end_idx:] + self.gamma*self.gamma_k[end_idx:]*(self.next_values[self.idx]*mask).unsqueeze(0)
self.G_prev[end_idx:] = G_new.clone().detach()
self.gamma_k[end_idx:] = self.gamma*self.gamma_k[end_idx:]
Vt_new = (1-self.lam)*self.sigmaG[end_idx:] + self.lam_t[end_idx:]*G_new
self.sigmaG[end_idx:] = self.sigmaG[end_idx:] + self.lam_t[end_idx:]*G_new
self.lam_t[end_idx:] = self.lam_t[end_idx:]*self.lam
self.Vt_out[end_idx:] = self.Vt_out[end_idx:]*(1-self.mask_t[end_idx:]) + Vt_new*self.mask_t[end_idx:]
self.mask_t[end_idx:] = self.mask_t[end_idx:]*(self.grad_masks[self.idx].unsqueeze(0))
if self.idx>0:
mask = self.masks[self.idx]
self.sigmar[start_idx:self.idx] = self.sigmar[start_idx:self.idx] + self.gamma_k[start_idx:self.idx]*self.rewards[self.idx].unsqueeze(0)
G_new = self.sigmar[start_idx:self.idx] + self.gamma*self.gamma_k[start_idx:self.idx]*(self.next_values[self.idx]*mask).unsqueeze(0)
self.G_prev[start_idx:self.idx] = G_new.clone().detach()
self.gamma_k[start_idx:self.idx] = self.gamma*self.gamma_k[start_idx:self.idx]
Vt_new = (1-self.lam)*self.sigmaG[start_idx:self.idx] + self.lam_t[start_idx:self.idx]*G_new
self.sigmaG[start_idx:self.idx] = self.sigmaG[start_idx:self.idx] + self.lam_t[start_idx:self.idx]*G_new
self.lam_t[start_idx:self.idx] = self.lam_t[start_idx:self.idx]*self.lam
self.Vt_out[start_idx:self.idx] = self.Vt_out[start_idx:self.idx]*(1-self.mask_t[start_idx:self.idx]) + Vt_new*self.mask_t[start_idx:self.idx]
self.mask_t[start_idx:self.idx] = self.mask_t[start_idx:self.idx]*(self.grad_masks[self.idx].unsqueeze(0))
else:
if self.idx > 0:
mask = self.masks[self.idx]
self.sigmar[:self.idx] = self.sigmar[:self.idx] + self.gamma_k[:self.idx]*self.rewards[self.idx].unsqueeze(0)
G_new = self.sigmar[:self.idx] + self.gamma*self.gamma_k[:self.idx]*(self.next_values[self.idx]*mask).unsqueeze(0)
self.G_prev[:self.idx] = G_new.clone().detach()
self.gamma_k[:self.idx] = self.gamma*self.gamma_k[:self.idx]
Vt_new = (1-self.lam)*self.sigmaG[:self.idx] + self.lam_t[:self.idx]*G_new
self.sigmaG[:self.idx] = self.sigmaG[:self.idx] + self.lam_t[:self.idx]*G_new
self.lam_t[:self.idx] = self.lam_t[:self.idx]*self.lam
self.Vt_out[:self.idx] = self.Vt_out[:self.idx]*(1-self.mask_t[:self.idx]) + Vt_new*self.mask_t[:self.idx]
self.mask_t[:self.idx]= self.mask_t[:self.idx]*(self.grad_masks[self.idx].unsqueeze(0))
## Initializing for self.idx
mask = self.masks[self.idx]
self.sigmar[self.idx] = self.rewards[self.idx].clone()
G_new = self.sigmar[self.idx] + self.gamma*self.next_values[self.idx]*mask
self.G_prev[self.idx] = G_new.clone().detach()
self.gamma_k[self.idx] = self.gamma
self.sigmaG[self.idx] = G_new.clone().detach()
Vt_new = G_new
self.lam_t[self.idx] = self.lam
self.Vt_out[self.idx] = Vt_new
self.mask_t[self.idx] = self.grad_masks[self.idx].clone().detach()
elif self.critic_method == 'gae-return':
if self.full:
# If the buffer is full, compute the start and end indices for the horizon to compute the gae-return
# After exceeding the buffer length, the experiences wrap around to the beginning of the buffer
if self.idx >= self.h-1:
start_idx = self.idx - self.h + 1
else:
# Accounting for the wrap around of the buffer. We want to update end_idx:end and start_idx:self.idx
start_idx = 0
end_idx = self.idx - self.h + 1
mask = self.masks[self.idx]
delta_gamma_k = self.gamma_lam_k[end_idx:] * (self.rewards[self.idx] - self.next_values[self.idx-1] + self.gamma * self.next_values[self.idx] * mask).unsqueeze(0)
self.Vt_out[end_idx:] = self.Vt_out[end_idx:] + delta_gamma_k * self.mask_t[end_idx:]
self.gamma_lam_k[end_idx:] = self.gamma_lam_k[end_idx:] * self.gamma * self.lam
self.mask_t[end_idx:] = self.mask_t[end_idx:] * self.term_masks[self.idx].unsqueeze(0)
if self.idx > 0:
mask = self.masks[self.idx]
delta_gamma_k = self.gamma_lam_k[start_idx:self.idx] * (self.rewards[self.idx] - self.next_values[self.idx-1] + self.gamma * self.next_values[self.idx] * mask).unsqueeze(0)
self.Vt_out[start_idx:self.idx] = self.Vt_out[start_idx:self.idx] + delta_gamma_k * self.mask_t[start_idx:self.idx]
self.gamma_lam_k[start_idx:self.idx] = self.gamma_lam_k[start_idx:self.idx] * self.gamma * self.lam
self.mask_t[start_idx:self.idx] = self.mask_t[start_idx:self.idx] * self.term_masks[self.idx].unsqueeze(0)
else:
# If the buffer is not full, only need to update start_idx:self.idx
start_idx = max(0, self.idx - self.h + 1)
mask = self.masks[self.idx]
delta_gamma_k = self.gamma_lam_k[start_idx:self.idx] * (self.rewards[self.idx] - self.next_values[self.idx-1] + self.gamma * self.next_values[self.idx] * mask).unsqueeze(0)
self.Vt_out[start_idx:self.idx] = self.Vt_out[start_idx:self.idx] + delta_gamma_k * self.mask_t[start_idx:self.idx]
self.gamma_lam_k[start_idx:self.idx] = self.gamma_lam_k[start_idx:self.idx] * self.gamma * self.lam
self.mask_t[start_idx:self.idx] = self.mask_t[start_idx:self.idx] * self.term_masks[self.idx].unsqueeze(0)
# Update Vt_out, gamma_lam_k, and mask_t for the current timestep
mask = self.masks[self.idx]
delta_gamma_k = self.rewards[self.idx] + self.gamma * self.next_values[self.idx] * mask
self.Vt_out[self.idx] = delta_gamma_k
self.gamma_lam_k[self.idx] = self.gamma * self.lam
self.mask_t[self.idx] = self.term_masks[self.idx].clone().detach()
else:
raise NotImplementedError
| 12,832 |
Python
| 58.688372 | 193 | 0.584165 |
RoboticExplorationLab/CGAC/cgac/cgac.py
|
import os
import torch
import torch.nn.functional as F
from torch.optim import Adam
from utils_cgac import *
from model import GaussianPolicy, QNetwork, DeterministicPolicy
import gc
import ipdb
from torch.nn.utils.clip_grad import clip_grad_norm_
import numpy as np
import time
from torch.distributions import Normal
class CGAC(object):
def __init__(self, num_inputs, action_space, args, memory_cgac, env=None):
self.gamma = args.gamma
self.tau = args.tau_value
self.alpha = args.alpha
self.args = args
if self.args.on_policy_update:
self.env_train = env
if args.env_name!='AllegroHand':
self.env_train.clear_grad()
self.env_train.reset()
self.state_batch = self.env_train.obs_buf
self.policy_type = args.policy
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.lr = args.actor_lr
self.device = torch.device("cuda" if args.cuda else "cpu")
self.rms_obs = RunningMeanStd((num_inputs,)).to(self.device)
self.val_running_median = 1.
self.act_running_median = 1.
self.state_running_median = 1.
self.memory_cgac = memory_cgac
self.critic = QNetwork(num_inputs, action_space.shape[0], args.critic_hidden, args).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.critic_lr, betas = args.betas)
self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.critic_hidden, args).to(self.device)
hard_update(self.critic_target, self.critic)
self.episode_steps = np.zeros(args.batch_size)
self.episode_rewards = torch.zeros(args.batch_size).to(self.device)
self.total_episode_reward_hist = []
self.episode_len_hist = []
self.num_episodes = 0
self.total_numsteps = 0
self.episode_wts = torch.ones(args.batch_size, 1).to(self.device)
self.queue_ids = set([])
self.num_nans = 0
self.rewards_moving_avg = 0.0
self.targ_ent_coeff = 1.0
self.mean_ratio = 100
self.ep_lens = torch.zeros_like(self.env_train.progress_buf) + 10
self.obs_dict = {}
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning is True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.log_alpha.data[0] = np.log(self.alpha)
self.alpha_optim = Adam([self.log_alpha], lr=args.alpha_lr)
self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.actor_hidden, args, action_space, const_std=args.const_std).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.actor_lr, betas = args.betas)
self.policy_avg = GaussianPolicy(num_inputs, action_space.shape[0], args.actor_hidden, args, action_space, const_std=args.const_std).to(self.device)
hard_update(self.policy_avg, self.policy)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
self.policy = DeterministicPolicy(num_inputs, action_space.shape[0], args.actor_hidden, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.actor_lr)
def select_action(self, state, evaluate=False):
if evaluate is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach()
def compute_loss(self, out, targ):
"""
Computes the loss for the critic network.
Hinge loss for TD error with hinge at 4*median.
"""
diff = torch.abs(out - targ)
median = diff.median(dim=0)[0].detach().clone().unsqueeze(0)
mask = (diff < 4*median).float()
cost = torch.square(out - targ)*(mask) + diff*8*median*(1-mask)
cost = torch.mean(cost)
return cost
def update_parameters_and_collect_buffer_RL(self, memory, batch_size, updates):
### Learning rate schedule for the actor and critic networks ###
if self.args.lr_schedule == 'linear':
final_lr = self.args.final_lr
actor_lr = max((final_lr - self.args.actor_lr) * float((updates) / self.args.max_updates) + self.args.actor_lr, final_lr)
for param_group in self.policy_optim.param_groups:
param_group['lr'] = actor_lr
self.lr = actor_lr
critic_lr = max((final_lr - self.args.critic_lr) * float((updates) / self.args.max_updates) + self.args.critic_lr, final_lr)
for param_group in self.critic_optim.param_groups:
param_group['lr'] = critic_lr
elif self.args.lr_schedule == 'step':
final_lr = self.args.final_lr
exp = updates//self.args.lr_update_freq
actor_lr = max(self.args.actor_lr * (self.args.lr_decay_rate**exp), final_lr)
for param_group in self.policy_optim.param_groups:
param_group['lr'] = actor_lr
self.lr = actor_lr
critic_lr = max(self.args.critic_lr * (self.args.lr_decay_rate**exp), final_lr)
for param_group in self.critic_optim.param_groups:
param_group['lr'] = critic_lr
else:
self.lr = self.args.actor_lr
### Schedule for alpha or target entropy for policy ###
if self.args.alpha_schedule == 'linear':
if self.automatic_entropy_tuning:
self.targ_ent_coeff = (self.args.final_targ_ent_coeff - self.args.init_targ_ent_coeff)*(self.rewards_moving_avg - self.args.init_expected_reward)/\
(self.args.peak_expected_reward - self.args.init_expected_reward) + self.args.init_targ_ent_coeff
else:
self.alpha = max((self.args.alpha_final - self.args.alpha_init) * float((updates) / self.args.max_updates_alpha) + self.args.alpha_init, self.args.alpha_final)
elif self.args.alpha_schedule == 'decay':
if updates % self.args.decay_steps == 0:
self.alpha = max(self.args.alpha_init/(2**(updates/self.args.decay_steps)), self.args.alpha_final)
### Update Obs statistics and get obs ###
self.rms_obs.update(self.env_train.obs_buf.detach().clone())
obs_batch = self.rms_obs(self.env_train.obs_buf)
### Sample actions and execute it in the environment ###
with torch.no_grad():
action, _, _ = self.policy_avg.sample(obs_batch)
action_batch = action
with torch.no_grad():
next_obs_batch, reward_batch, done_batch, info = self.env_train.step(action_batch, force_done_ids)
next_obs_batch = self.rms_obs(next_obs_batch)
mask_batch = (1-done_batch.unsqueeze(-1)).float()
done_env_ids = done_batch.nonzero(as_tuple = False).squeeze(-1).cpu().numpy()
### Update reward statistics ###
self.episode_steps += 1
self.total_numsteps += self.args.batch_size
self.episode_rewards += reward_batch
reward_batch = reward_batch.unsqueeze(-1)
self.reward_batch_curr = (reward_batch.clone().detach()*self.episode_wts).sum()/self.episode_wts.sum()
self.rewards_moving_avg = 0.95*self.rewards_moving_avg + 0.05*self.reward_batch_curr
### Handling Env Terminations ###
if len(done_env_ids) > 0:
self.total_episode_reward_hist += self.episode_rewards[done_env_ids].cpu().numpy().tolist()
self.episode_len_hist += self.episode_steps[done_env_ids].tolist()
self.episode_rewards[done_env_ids] = 0
self.episode_steps[done_env_ids] = 0
self.num_episodes += len(done_env_ids)
inv_mask = torch.logical_or(
torch.logical_or(
torch.isnan(info['obs_before_reset'][done_env_ids]).sum(dim=-1) > 0,
torch.isinf(info['obs_before_reset'][done_env_ids]).sum(dim=-1) > 0),
(torch.abs(info['obs_before_reset'][done_env_ids]) > 1e6).sum(dim=-1) > 0
)
self.num_nans += inv_mask.float().sum()
eplen_mask = self.env_train.progress_buf_mask[done_env_ids] == self.env_train.episode_length
if eplen_mask.float().sum() >0:
eplen_done_ids = done_env_ids[eplen_mask.cpu().numpy()]
inv_mask = torch.logical_or(
torch.logical_or(
torch.isnan(info['obs_before_reset'][eplen_done_ids]).sum(dim=-1) > 0,
torch.isinf(info['obs_before_reset'][eplen_done_ids]).sum(dim=-1) > 0),
(torch.abs(info['obs_before_reset'][eplen_done_ids]) > 1e6).sum(dim=-1) > 0
)
valid_mask = torch.logical_not(inv_mask)
val_done_ids = eplen_done_ids[valid_mask.cpu().numpy()]
if len(val_done_ids)>0:
mask_batch[val_done_ids] = 1.
next_obs_batch[val_done_ids] = self.rms_obs(info['obs_before_reset'][val_done_ids])
### Compute next state Q values ###
with torch.no_grad():
next_state_action1, next_state_log_pi, _ = self.policy_avg.sample(next_obs_batch.detach(), 1)
qf1_next_target1, qf2_next_target1 = self.critic_target(next_obs_batch, next_state_action1)
min_qf_next = torch.min(qf1_next_target1, qf2_next_target1)
term_mask = mask_batch.clone().detach()
### Update replay buffer ###
self.memory_cgac.add(obs_batch, action_batch, reward_batch, min_qf_next.detach().clone(), mask_batch, term_mask, next_state_log_pi.detach().clone(), self.alpha, self.episode_wts.clone().detach(), updates)
### Prevent suddent collapse and reset of too many envs - This can skew the distribution ###
### This is done by keeping a count of finished episodes and performing resets at a specific rate ###
### The finished episodes until they are reset are ignored for training ###
env_thres_stop = self.env_thres = (self.args.batch_size)/(self.env_train.ep_lens[:200].float().mean()+1e-8)
env_thres_start = max((self.args.batch_size)/(self.env_train.ep_lens.float().mean()+1e-8), 1)
self.len_dones = len(done_env_ids)
force_done_ids = None
done_env_ids_resets = list(set(done_env_ids).difference(self.queue_ids))
if len(done_env_ids_resets) > int(env_thres_stop) + 1:
env_thres_int = int(env_thres_stop) + 1
self.episode_wts[done_env_ids_resets[env_thres_int:]] = 0
self.queue_ids.update(list(done_env_ids_resets[env_thres_int:]))
if len(done_env_ids_resets) < int(env_thres_start):
env_thres_int = int(env_thres_start)
num_resets = min(env_thres_int-len(done_env_ids_resets), len(self.queue_ids)+self.args.max_rand_resets)
num_rand_resets = max(min(num_resets - len(self.queue_ids), self.args.max_rand_resets),0)
nids = min(env_thres_int-len(done_env_ids_resets), len(self.queue_ids))
queue_ids = list(self.queue_ids)
if num_rand_resets>0:
rand_reset_ids = list(np.random.randint(self.args.batch_size, size=(num_rand_resets,)))
reset_ids = rand_reset_ids + queue_ids
self.memory_cgac.mask_t[:, rand_reset_ids] *= 0
else:
reset_ids = queue_ids[:nids]
self.episode_wts[reset_ids] = 1
force_done_ids = torch.tensor(reset_ids).long()
self.queue_ids = set(queue_ids[nids:]).difference(reset_ids)
self.env_train.reset(force_done_ids, eplenupdate=False)
self.episode_rewards[force_done_ids] = 0
self.episode_steps[force_done_ids] = 0
self.len_queue = len(self.queue_ids)
### Update critic parameters if num_critic_updates > 1 ###
for i in range(self.args.num_critic_updates-1):
self.update_parameters_with_RL(updates, critic_update_only=True)
updates += 1
### Update critic params, actor params and alpha ###
return self.update_parameters_with_RL(updates)
def update_parameters_with_RL(self, updates, critic_update_only=False):
obs_batch_new, action_batch, reward_batch, next_q_value, mask_batch, episode_wts = self.memory_cgac.sample(self.args.batch_size_update)
episode_wts = episode_wts/episode_wts.mean()
qf1, qf2 = self.critic(obs_batch_new, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step
### CRITIC UPDATE ###
### Computing hinge TD errors. episode_wts are used to ignore transitions ###
qf1_loss = self.compute_loss(qf1*episode_wts, next_q_value*episode_wts) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = self.compute_loss(qf2*episode_wts, next_q_value*episode_wts) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf_loss = qf1_loss + qf2_loss
qf1_loss_batch = (qf1*episode_wts - next_q_value*episode_wts)**2
qf2_loss_batch = (qf2*episode_wts - next_q_value*episode_wts)**2
qf_loss_batch = qf1_loss_batch + qf2_loss_batch
qf_loss_full = qf_loss
self.critic_optim.zero_grad()
qf_loss_full.backward()
critic_grad_norm = clip_grad_norm_(self.critic.parameters(), self.args.grad_norm)
self.critic_optim.step()
### Update actor and alpha if critic_update_only is not True ###
if not critic_update_only:
### ACTOR UPDATE ###
pi, log_pi, _, x_t = self.policy.sample(obs_batch_new.detach().clone(), with_xt=True)
qf1_pi, qf2_pi = self.critic(obs_batch_new.detach().clone(), pi)
min_qf_pi = torch.min(qf1_pi,qf2_pi)*episode_wts
if self.args.clip_actor_gn:
qpi_grad = torch.autograd.grad(min_qf_pi.mean(), pi, retain_graph=True)[0]
ratio = (qpi_grad.abs().max(dim=0)[0]/qpi_grad.abs().median(dim=0)[0]).mean()
self.mean_ratio = 0.95*self.mean_ratio + 0.05*ratio
if ratio > self.mean_ratio*2:
qpi_grad = torch.clamp(qpi_grad, min=torch.quantile(qpi_grad, 0.5, dim=0), max=torch.quantile(qpi_grad, 0.95, dim=0)).detach().clone()
policy_loss = (self.alpha * log_pi*episode_wts).mean() - (qpi_grad*pi).sum()# # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
else:
policy_loss = ((self.alpha * log_pi*episode_wts) - min_qf_pi).mean()
# pol_grad = torch.autograd.grad(policy_loss, pi, retain_graph=True)[0]
# self.snr_q = (pol_grad.mean(dim=0).abs()/pol_grad.std(dim=0)).mean()
self.policy_optim.zero_grad()
policy_loss.backward()
actor_grad_norm = self.policy.layers[3].weight.grad.norm().item()
self.policy_optim.step()
### ALPHA UPDATE ###
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi*episode_wts + self.targ_ent_coeff*self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = min(self.log_alpha.exp().detach().item(), self.alpha)
self.log_alpha.data[0] = np.log(self.alpha)
alpha_tlogs = self.alpha # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
### Update target network and policy avg network ###
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.args.tau_value)
soft_update(self.policy_avg, self.policy, self.args.tau_policy)
return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs, log_pi.mean().item(), min_qf_pi.mean().item(), next_q_value.detach().cpu(), \
qf_loss_batch.detach().cpu(), actor_grad_norm, critic_grad_norm
return None
def update_parameters_and_collect_buffer_RL_isaac(self, memory, batch_size, updates):
### Learning rate schedule for the actor and critic networks ###
if self.args.lr_schedule == 'linear':
final_lr = self.args.final_lr
actor_lr = max((final_lr - self.args.actor_lr) * float((updates) / self.args.max_updates) + self.args.actor_lr, final_lr)
for param_group in self.policy_optim.param_groups:
param_group['lr'] = actor_lr
self.lr = actor_lr
critic_lr = max((final_lr - self.args.critic_lr) * float((updates) / self.args.max_updates) + self.args.critic_lr, final_lr)
for param_group in self.critic_optim.param_groups:
param_group['lr'] = critic_lr
elif self.args.lr_schedule == 'step':
final_lr = self.args.final_lr
exp = updates//self.args.lr_update_freq
actor_lr = max(self.args.actor_lr * (self.args.lr_decay_rate**exp), final_lr)
for param_group in self.policy_optim.param_groups:
param_group['lr'] = actor_lr
self.lr = actor_lr
critic_lr = max(self.args.critic_lr * (self.args.lr_decay_rate**exp), final_lr)
for param_group in self.critic_optim.param_groups:
param_group['lr'] = critic_lr
else:
self.lr = self.args.actor_lr
### Schedule for alpha or target entropy for policy ###
if self.args.alpha_schedule == 'linear':
if self.automatic_entropy_tuning:
self.targ_ent_coeff = max((self.args.final_targ_ent_coeff - self.args.init_targ_ent_coeff)*(self.rewards_moving_avg - self.args.init_expected_reward)/\
(self.args.peak_expected_reward - self.args.init_expected_reward) + self.args.init_targ_ent_coeff, -0.4)#, self.targ_ent_coeff)#self.args.final_targ_ent_coeff)
# if self.rewards_moving_avg > self.args.peak_expected_reward:
# self.alpha = min(self.alpha, 1e-5)
else:
self.alpha = max((self.args.alpha_final - self.args.alpha_init) * float((updates) / self.args.max_updates_alpha) + self.args.alpha_init, self.args.alpha_final)
elif self.args.alpha_schedule == 'decay':
if updates % self.args.decay_steps == 0:
self.alpha = max(self.args.alpha_init/(2**(updates/self.args.decay_steps)), self.args.alpha_final)
### Update Obs statistics and get obs ###
obs_dict, dones = self._env_reset_done()
obs_buf = obs_dict['obs']
self.rms_obs.update(obs_buf)
obs_batch = self.rms_obs(obs_buf)
### Sample actions and execute it in the environment ###
with torch.no_grad():
action, _, _ = self.policy_avg.sample(obs_batch)
action_batch = action
next_obs_batch, reward_batch, done_batch, info = self.env_train.step(action_batch)
next_obs_batch = next_obs_batch['obs']
next_obs_batch = self.rms_obs(next_obs_batch)
mask_batch = (1-done_batch.unsqueeze(-1)).float()
done_env_ids = done_batch.nonzero(as_tuple = False).squeeze(-1).cpu().numpy()
term_mask = mask_batch.clone()
### Update reward statistics ###
self.episode_steps += 1
self.total_numsteps += self.args.batch_size
self.episode_rewards += reward_batch.detach().clone()
reward_batch = reward_batch.unsqueeze(-1)
self.reward_batch_curr = (reward_batch.clone().detach()*self.episode_wts).sum()/self.episode_wts.sum()
self.rewards_moving_avg = 0.95*self.rewards_moving_avg + 0.05*self.reward_batch_curr
### Handling Env Terminations
if len(done_env_ids) > 0:
self.total_episode_reward_hist += self.episode_rewards[done_env_ids].cpu().numpy().tolist()
self.episode_len_hist += self.episode_steps[done_env_ids].tolist()
self.episode_rewards[done_env_ids] = 0
self.episode_steps[done_env_ids] = 0
self.num_episodes += len(done_env_ids)
inv_mask = torch.logical_or(
torch.logical_or(
torch.isnan(next_obs_batch[done_env_ids]).sum(dim=-1) > 0,
torch.isinf(next_obs_batch[done_env_ids]).sum(dim=-1) > 0),
(torch.abs(next_obs_batch[done_env_ids]) > 1e6).sum(dim=-1) > 0
)
self.num_nans += inv_mask.float().sum()
self.ep_lens = torch.cat([self.env_train.progress_buf[done_env_ids].clone(), self.ep_lens], dim=0)[:200]
eplen_mask = self.env_train.progress_buf[done_env_ids] == self.env_train.max_episode_length # Need to check
if eplen_mask.float().sum() >0:
eplen_done_ids = done_env_ids[eplen_mask.cpu().numpy()]
inv_mask = torch.logical_or(
torch.logical_or(
torch.isnan(next_obs_batch[eplen_done_ids]).sum(dim=-1) > 0,
torch.isinf(next_obs_batch[eplen_done_ids]).sum(dim=-1) > 0),
(torch.abs(next_obs_batch[eplen_done_ids]) > 1e6).sum(dim=-1) > 0
)
valid_mask = torch.logical_not(inv_mask)
val_done_ids = eplen_done_ids[valid_mask.cpu().numpy()]
if len(val_done_ids)>0:
mask_batch[val_done_ids] = 1.
### Compute next state Q values ###
with torch.no_grad():
next_state_action1, next_state_log_pi, _ = self.policy_avg.sample(next_obs_batch.detach(), 1)
qf1_next_target1, qf2_next_target1 = self.critic_target(next_obs_batch, next_state_action1)
min_qf_next = torch.min(qf1_next_target1, qf2_next_target1)
### Update replay buffer ###
self.memory_cgac.add(obs_batch, action_batch, reward_batch, min_qf_next.detach().clone(), mask_batch, term_mask, next_state_log_pi.detach().clone(), self.alpha, self.episode_wts.clone().detach(), updates)
### Prevent suddent collapse and reset of too many envs - This can skew the distribution ###
### This is done by keeping a count of finished episodes and performing resets at a specific rate ###
### The finished episodes until they are reset are ignored for training ###
env_thres_stop = self.env_thres = (self.args.batch_size)/(self.ep_lens[:200].float().mean()+1e-8)
env_thres_start = (self.args.batch_size)/(self.ep_lens.float().mean()+1e-8)
self.len_dones = len(done_env_ids)
done_env_ids_resets = list(set(done_env_ids).difference(self.queue_ids))
force_done_ids = None
if len(done_env_ids_resets) > int(env_thres_stop) + 1:
env_thres_int = int(env_thres_stop) + 1
self.episode_wts[done_env_ids_resets[env_thres_int:]] = 0
self.queue_ids.update(list(done_env_ids_resets[env_thres_int:]))
if len(done_env_ids_resets) < int(env_thres_start):
env_thres_int = int(env_thres_start)
num_resets = min(env_thres_int-len(done_env_ids_resets), len(self.queue_ids)+self.args.max_rand_resets)
num_rand_resets = max(min(num_resets - len(self.queue_ids), self.args.max_rand_resets),0)
nids = min(env_thres_int-len(done_env_ids_resets), len(self.queue_ids))
queue_ids = list(self.queue_ids)
if num_rand_resets>0:
rand_reset_ids = list(np.random.randint(self.args.batch_size, size=(num_rand_resets,)))
reset_ids = rand_reset_ids + queue_ids
self.memory_cgac.mask_t[:, rand_reset_ids] *= 0
else:
reset_ids = queue_ids[:nids]
if len(reset_ids) > 0:
self.episode_wts[reset_ids] = 1
force_done_ids = torch.tensor(reset_ids).long()
self.queue_ids = set(queue_ids[nids:]).difference(reset_ids)
self.env_train.reset_idx(force_done_ids, force_done_ids)
self.episode_rewards[force_done_ids] = 0
self.episode_steps[force_done_ids] = 0
self.len_queue = len(self.queue_ids)
### Update critic parameters if num_critic_updates > 1 ###
for i in range(self.args.num_critic_updates-1):
self.update_parameters_with_RL_isaac(updates, critic_update_only=True)
updates += 1
### Update critic params, actor params and alpha ###
return self.update_parameters_with_RL_isaac(updates)
def update_parameters_with_RL_isaac(self, updates, critic_update_only=False):
obs_batch_new, action_batch, reward_batch, next_q_value, mask_batch, episode_wts = self.memory_cgac.sample(self.args.batch_size_update)
episode_wts = episode_wts/episode_wts.mean()
qf1, qf2 = self.critic(obs_batch_new, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step
### CRITIC UPDATE ###
### Computing hinge TD errors. episode_wts are used to ignore transitions ###
qf1_loss = self.compute_loss(qf1*episode_wts, next_q_value*episode_wts)
qf2_loss = self.compute_loss(qf2*episode_wts, next_q_value*episode_wts)
qf_loss = qf1_loss + qf2_loss
qf1_loss_batch = (qf1*episode_wts - next_q_value*episode_wts)**2
qf2_loss_batch = (qf2*episode_wts - next_q_value*episode_wts)**2
qf_loss_batch = qf1_loss_batch + qf2_loss_batch
qf_loss_full = qf_loss
self.critic_optim.zero_grad()
qf_loss_full.backward()
critic_grad_norm = clip_grad_norm_(self.critic.parameters(), self.args.grad_norm)
self.critic_optim.step()
### Update actor and alpha if critic_update_only is not True ###
if not critic_update_only:
### ACTOR UPDATE ###
pi, log_pi, _ = self.policy.sample(obs_batch_new.detach().clone())
qf1_pi, qf2_pi = self.critic(obs_batch_new.detach().clone(), pi)
min_qf_pi = torch.min(qf1_pi,qf2_pi)*episode_wts
if self.args.clip_actor_gn:
qpi_grad = torch.autograd.grad(min_qf_pi.mean(), pi, retain_graph=True)[0]
ratio = (qpi_grad.abs().max(dim=0)[0]/qpi_grad.abs().median(dim=0)[0]).mean()
self.mean_ratio = 0.95*self.mean_ratio + 0.05*ratio
if ratio > self.mean_ratio*2:
qpi_grad = torch.clamp(qpi_grad, min=torch.quantile(qpi_grad, 0.5, dim=0), max=torch.quantile(qpi_grad, 0.95, dim=0)).detach().clone()
policy_loss = (self.alpha * log_pi*episode_wts).mean() - (qpi_grad*pi).sum()# # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
else:
policy_loss = ((self.alpha * log_pi*episode_wts) - min_qf_pi).mean()
self.policy_optim.zero_grad()
policy_loss.backward()
actor_grad_norm = self.policy.layers[3].weight.grad.norm().item()
self.policy_optim.step()
### ALPHA UPDATE ###
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi*episode_wts + self.targ_ent_coeff*self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = min(self.log_alpha.exp().detach().item(), self.alpha)
self.log_alpha.data[0] = np.log(self.alpha)
alpha_tlogs = self.alpha # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
### Update target network and policy avg network ###
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.args.tau_value)
soft_update(self.policy_avg, self.policy, self.args.tau_policy)
return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs, log_pi.mean().item(), min_qf_pi.mean().item(), next_q_value.detach().cpu(), \
qf_loss_batch.detach().cpu(), actor_grad_norm, critic_grad_norm
return None
def _env_reset_done(self):
"""Reset the environment.
Returns:
Observation dictionary, indices of environments being reset
"""
done_env_ids = self.env_train.reset_buf.nonzero(as_tuple=False).flatten()
goal_env_ids = self.env_train.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(done_env_ids) == 0:
self.env_train.reset_target_pose(goal_env_ids, apply_reset=True)
# if goals need reset in addition to other envs, call set API in reset()
elif len(goal_env_ids) > 0:
self.env_train.reset_target_pose(goal_env_ids)
if len(done_env_ids) > 0:
self.env_train.reset_idx(done_env_ids, goal_env_ids)
if len(goal_env_ids) > 0 or len(done_env_ids) > 0:
self.env_train.compute_observations()
self.obs_dict["obs"] = torch.clamp(self.env_train.obs_buf, -self.env_train.clip_obs, self.env_train.clip_obs).to(self.env_train.rl_device)
return self.obs_dict, done_env_ids
# Save model parameters
def save_checkpoint(self, env_name, suffix="", ckpt_path=None):
if not os.path.exists('checkpoints/'):
os.makedirs('checkpoints/')
if ckpt_path is None:
ckpt_path = "checkpoints/cgac_checkpoint_{}_{}".format(env_name, suffix)
print('Saving models to {}'.format(ckpt_path))
torch.save({'policy_state_dict': self.policy.state_dict(),
'critic_state_dict': self.critic.state_dict(),
'critic_target_state_dict': self.critic_target.state_dict(),
'critic_optimizer_state_dict': self.critic_optim.state_dict(),
'policy_optimizer_state_dict': self.policy_optim.state_dict()}, ckpt_path)
# Load model parameters
def load_checkpoint(self, ckpt_path, evaluate=False):
print('Loading models from {}'.format(ckpt_path))
if ckpt_path is not None:
checkpoint = torch.load(ckpt_path)
self.policy.load_state_dict(checkpoint['policy_state_dict'])
self.critic.load_state_dict(checkpoint['critic_state_dict'])
self.critic_target.load_state_dict(checkpoint['critic_target_state_dict'])
self.critic_optim.load_state_dict(checkpoint['critic_optimizer_state_dict'])
self.policy_optim.load_state_dict(checkpoint['policy_optimizer_state_dict'])
if evaluate:
self.policy.eval()
self.critic.eval()
self.critic_target.eval()
else:
self.policy.train()
self.critic.train()
self.critic_target.train()
| 31,804 |
Python
| 52.543771 | 212 | 0.590272 |
RoboticExplorationLab/CGAC/cgac/utils_cgac.py
|
import math
import torch
import torch.nn as nn
import numpy as np
def create_log_gaussian(mean, log_std, t):
"""Creates a log probability for a Gaussian distribution."""
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
"""Numerically stable logsumexp."""
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def rand_sample(act_space, bsz):
return torch.rand((bsz,act_space.shape[0]))*(act_space.high-act_space.low) + act_space.low
def grad_norm(params):
"""Computes the norm of gradients for a group of parameters."""
grad_norm = 0.
for p in params:
if p.grad is not None:
grad_norm += torch.sum(p.grad ** 2)
return torch.sqrt(grad_norm)
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
'''
updates statistic from a full data
'''
class RunningMeanStd(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
super(RunningMeanStd, self).__init__()
print('RunningMeanStd: ', insize)
self.insize = insize
self.epsilon = epsilon
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("running_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("running_var", torch.ones(in_size, dtype = torch.float64))
self.register_buffer("count", torch.ones((), dtype = torch.float64))
def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta**2 * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def update(self, input):
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.running_mean, self.running_var, self.count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count,
mean, var, input.size()[0] )
def forward(self, input, unnorm=False):
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.running_mean.detach().view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.running_var.detach().view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.running_mean.detach().view([1, self.insize[0], 1]).expand_as(input)
current_var = self.running_var.detach().view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.running_mean.detach().view([1, self.insize[0]]).expand_as(input)
current_var = self.running_var.detach().view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.running_mean.detach()
current_var = self.running_var.detach()
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
if self.norm_only:
y = input/ torch.sqrt(current_var.float() + self.epsilon)
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
'''
updates statistic from a full data
'''
class RunningGradMag(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False, const=1):
super(RunningGradMag, self).__init__()
print('RunningGradMag: ', insize)
self.insize = insize
self.epsilon = epsilon
self.const = const
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("running_absmean", torch.ones(in_size, dtype = torch.float64))
self.register_buffer("count", torch.ones((), dtype = torch.float64))
def _update_mean_var_count_from_moments(self, mean, count, batch_mean, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
new_count = tot_count
return new_mean, new_count
def update(self, input):
mean = input.abs().mean(self.axis) # along channel axis
self.running_absmean, self.count = self._update_mean_var_count_from_moments(self.running_absmean, self.count,
mean, input.size()[0] )
def forward(self, input, unnorm=False):
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.running_absmean.detach().view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.running_absmean.detach().view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.running_absmean.detach().view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.running_absmean.detach()
# get output
if unnorm:
y = input/self.const#(current_mean.float())
else:
y = input*self.const#(current_mean.float())
return y
class RunningMeanStdObs(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
assert(insize is dict)
super(RunningMeanStdObs, self).__init__()
self.running_mean_std = nn.ModuleDict({
k : RunningMeanStd(v, epsilon, per_channel, norm_only) for k,v in insize.items()
})
def forward(self, input, unnorm=False):
res = {k : self.running_mean_std(v, unnorm) for k,v in input.items()}
return res
class DummyRMS(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
super(DummyRMS, self).__init__()
print('DummyRMS: ', insize)
self.insize = insize
self.epsilon = epsilon
def forward(self, input, unnorm=False):
return input
def update(self, input):
return None
| 8,104 |
Python
| 37.051643 | 152 | 0.572804 |
RoboticExplorationLab/CGAC/cgac/model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
LOG_SIG_MAX = 2
LOG_SIG_MIN = -5
epsilon = 1e-6
activations_dict = {'elu': nn.ELU(),
'relu': nn.ReLU(),
'tanh': nn.Tanh()}
# Initialize Policy weights
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, args):
super(QNetwork, self).__init__()
if isinstance(hidden_dim, int):
hidden_dim = [hidden_dim, hidden_dim]
layer_sizes = [num_inputs + num_actions,]+hidden_dim
activation = activations_dict[args.critic_act]
# Q1 architecture
layers1 = []
for i in range(len(layer_sizes)-1):
layers1.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))
layers1.append(activation)
layers1.append(nn.Identity())
self.layers1 = nn.Sequential(*layers1)
self.layer_out1 = nn.Linear(layer_sizes[-1], 1)
# Q2 architecture
layers2 = []
for i in range(len(layer_sizes)-1):
layers2.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))
layers2.append(activation)
layers2.append(nn.Identity())
self.layers2 = nn.Sequential(*layers2)
self.layer_out2 = nn.Linear(layer_sizes[-1], 1)
self.apply(weights_init_)
def forward(self, state, action):
xu = torch.cat([state, action], 1)
x1 = self.layers1(xu)
x1 = self.layer_out1(x1)
x2 = self.layers2(xu)
x2 = self.layer_out2(x2)
return x1, x2
class GaussianPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, args, action_space=None, const_std=False):
super(GaussianPolicy, self).__init__()
if isinstance(hidden_dim, int):
hidden_dim = [hidden_dim, hidden_dim]
layer_sizes = [num_inputs,]+hidden_dim
activation = activations_dict[args.actor_act]
layers = []
for i in range(len(layer_sizes)-1):
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))
layers.append(activation)
layers.append(nn.Identity())
self.layers = nn.Sequential(*layers)
self.mean_linear = nn.Linear(layer_sizes[-1], num_actions)
self.const_std = const_std
if const_std:
logstd = -1.0#'actor_logstd_init'
self.logstd = torch.nn.Parameter(torch.ones(num_actions, dtype=torch.float32) * logstd)
else:
self.log_std_linear = nn.Linear(layer_sizes[-1], num_actions)
self.apply(weights_init_)
# action rescaling
if action_space is None:
self.action_scale = torch.tensor(1.)
self.action_bias = torch.tensor(0.)
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = self.layers(state)
mean = self.mean_linear(x)
if self.const_std:
log_std = self.logstd
else:
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
return mean, log_std
def log_prob(self, state, action, x_t):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
y_t = (action - self.action_bias)/self.action_scale
log_prob = normal.log_prob(x_t)
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon)
log_prob = log_prob.sum(1, keepdim=True)
return log_prob
def sample(self, state, netid=1, with_xt=False):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
if with_xt:
return action, log_prob, mean, x_t
else:
return action, log_prob, mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
return super(GaussianPolicy, self).to(device)
class DeterministicPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None):
super(DeterministicPolicy, self).__init__()
if isinstance(hidden_dim, int):
hidden_dim = [hidden_dim, hidden_dim]
layer_sizes = [num_inputs,]+hidden_dim
layers = []
for i in range(len(layer_sizes)-1):
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))
layers.append(nn.ReLU())
self.layers = nn.Sequential(*layers)
self.mean = nn.Linear(layer_sizes[-1], num_actions)
self.noise = torch.Tensor(num_actions)
self.apply(weights_init_)
# action rescaling
if action_space is None:
self.action_scale = 1.
self.action_bias = 0.
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = self.layers(state)
mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias
return mean
def sample(self, state):
mean = self.forward(state)
noise = self.noise.normal_(0., std=0.1)
noise = noise.clamp(-0.25, 0.25)
action = mean + noise
return action, torch.tensor(0.), mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
self.noise = self.noise.to(device)
return super(DeterministicPolicy, self).to(device)
| 6,511 |
Python
| 34.9779 | 102 | 0.58071 |
shikimori/shikimori/CONTRIBUTING.md
|
* [Fork](https://help.github.com/articles/fork-a-repo) the project on GitHub.
* Make your feature addition or bug fix in a feature branch. (Include a description of your changes)
* Push your feature branch to GitHub.
* Send a [Pull Request](https://help.github.com/articles/using-pull-requests).
After opening your pull request, ensure all tests pass on Circle CI. If a test fails and you believe it is unrelated to your change, leave a comment on the pull request explaining why.
| 482 |
Markdown
| 67.99999 | 184 | 0.76971 |
shikimori/shikimori/README.md
|
[](https://github.com/shikimori/shikimori/actions/workflows/rspec.yml)
## Contributing
Feel free to open tickets or send pull requests with improvements. Thanks in advance for your help!
Please follow the [contribution guidelines](https://github.com/shikimori/shikimori/blob/master/CONTRIBUTING.md).
## Requirements
OSX or Linux
PostgreSQL >= 10.0, Ruby >= 2.6, NodeJS >= 10.0, Elasticsearch 6.x (7.0 not supported), Memcached, Redis
## Issues Board (Agile Season)
https://agileseason.com/#/shared/board/098d2e36dff32f296d7815cf943ac8eb
## Requirements
### Checkout all projects
```sh
git clone git@github.com:shikimori/shikimori.git
git clone git@github.com:shikimori/neko-achievements.git
cd neko-achievements
mix local.hex --force
mix deps.get
cd ..
git clone git@github.com:shikimori/camo-server.git
cd camo-server
yarn
cd ..
git clone git@github.com:shikimori/faye-server.git
cd faye-server
yarn
cd ..
cd shikimori
```
### Install `yarn`, `tmux` and `overmind` via Homebrew (OSX)
```sh
brew install yarn tmux overmind
```
In linux you have to install them another way.
### Install dependent gems and npm packages
```sh
yarn install
bundle install
```
## PostgreSQL
### DB
```sh
psql -d postgres
```
```sql
create user shikimori_development;
create user shikimori_test;
alter user shikimori_development createdb;
alter user shikimori_test createdb;
alter user shikimori_development with superuser;
alter user shikimori_test with superuser;
```
### Create databases
Make sure `en_US.UTF-8` database collation is set [https://gist.github.com/ffmike/877447#gistcomment-2851598](https://gist.github.com/morr/9507173acfd504837a7feb4485a5f669)
Or you manually initialize new database with command
```sh
initdb --pgdata=/usr/local/var/postgres-16 -E 'UTF-8' --lc-collate='en_US.UTF-8' --lc-ctype='en_US.UTF-8'
```
Or initdb for apple M1
```sh
initdb --pgdata=/usr/local/var/postgresql@16 -E 'UTF-8' --lc-collate='en_US.UTF-8' --lc-ctype='en_US.UTF-8'
```
Create rails databases
```sh
rails db:create
```
## Local Run
Everything you need to run is listed in [Procfile](https://github.com/shikimori/shikimori/blob/master/Procfile).
Shikimori uses [Overmind](https://github.com/DarthSim/overmind) to execute `Procfile`.
### Restore from a backup
```sh
rails db:drop && rails db:create
unzip -d db/ db/dump.sql.zip
psql -U shikimori_development -d shikimori_development -f db/dump.sql
rm db/dump.sql
RAILS_ENV=test rails db:schema:load
# migrate dump to latest schema
rails db:migrate
```
### Start rails server
```sh
rails server
```
### Start related services
```sh
overmind start
```
### Start some of related services
```sh
OVERMIND_PROCESSES=camo,faye overmind start
```
## Elasticsearch
In rails console:
```
Elasticsearch::RebuildIndexes.new.perform
```
## Elasticsearch fix on OSX
https://github.com/Homebrew/homebrew-core/issues/100260#issuecomment-1137067501
```
I've finally made it work, but I'm not sure this is the right call:
I've edited the service plist at /usr/local/Cellar/elasticsearch@6/6.8.23/homebrew.mxcl.elasticsearch@6.plist:
<key>ProgramArguments</key>
<array>
<string>/usr/local/opt/elasticsearch@6/bin/elasticsearch</string>
</array>
<key>EnvironmentVariables</key>
<dict>
+ <key>JAVA_HOME</key>
+ <string>'/usr/libexec/java_home -v 17'</string>
</dict>
I had to edit the plist in the Cellar folder instead of the one in ~/Library/LaunchAgents because brew services is overwriting it at every start.
```
## Update neko rules
```sh
rails neko:update
```
## Other
### Make a backup
```sh
pg_dump -c shikimori_development > db/dump.sql
```
### Autorun rspec & rubocop
```sh
guard
```
### Record apipie docs
```sh
APIPIE_RECORD=all rspec spec/controllers/api/**
```
### Add new video hosting
```ruby
# app/services/video_extractor/player_url_extractor.rb
```
### Run locally in production mode
```sh
RAILS_ENV=production rails assets:precompile && IS_LOCAL_RUN=true RAILS_ENV=production rails server
```
### Webpack debugger
https://nodejs.org/en/docs/inspector/
Install the Chrome Extension NIM (Node Inspector Manager): https://chrome.google.com/webstore/detail/nim-node-inspector-manage/gnhhdgbaldcilmgcpfddgdbkhjohddkj
```sh
RAILS_ENV=development NODE_ENV=development NODE_PATH=node_modules node --inspect-brk node_modules/.bin/webpack-dev-server --progress --color --config config/webpack/development.js
```
### Shakapacker debugger
https://nodejs.org/en/docs/inspector/
Install the Chrome Extension NIM (Node Inspector Manager): https://chrome.google.com/webstore/detail/nim-node-inspector-manage/gnhhdgbaldcilmgcpfddgdbkhjohddkj
```sh
./bin/shakapacker-dev-server --debug-shakapacker
```
### Webpack visualizer
https://chrisbateman.github.io/webpack-visualizer/
### Dependabot
```
@dependabot ignore this dependency
```
## [Sandboxes](/doc/sandboxes.md)
| 4,968 |
Markdown
| 24.482051 | 179 | 0.741143 |
shikimori/shikimori/config/i18n.yml
|
---
translations:
- file: "app/packs/javascripts/i18n/translations.json"
patterns:
- '*.activerecord.attributes.user_rate.*'
- '*.activerecord.attributes.collection_link.*'
- '*.activerecord.attributes.external_link.url'
- '*.activerecord.attributes.user_rate.statuses.anime.*'
- '*.activerecord.attributes.user_rate.statuses.manga.*'
- '*.frontend.*'
| 394 |
YAML
| 34.909088 | 62 | 0.659898 |
shikimori/shikimori/config/cable.yml
|
development:
adapter: async
test:
adapter: test
production:
adapter: redis
url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %>
channel_prefix: shikimori_production
| 190 |
YAML
| 16.363635 | 67 | 0.684211 |
shikimori/shikimori/config/appsignal.yml
|
default: &defaults
# Your push api key, it is possible to set this dynamically using ERB:
# push_api_key: "<%= ENV['APPSIGNAL_PUSH_API_KEY'] %>"
push_api_key: '3b66a159-85bd-43dc-acc4-b43750364cdc'
# Your app's name
name: 'App'
# Actions that should not be monitored by AppSignal
# ignore_actions:
# - ApplicationController#isup
# Errors that should not be recorded by AppSignal
# For more information see our docs:
# https://docs.appsignal.com/ruby/configuration/ignore-errors.html
ignore_errors:
- AbstractController::ActionNotFound
- ActionController::InvalidAuthenticityToken
- ActionController::ParameterMissing
- ActionController::RoutingError
- ActionController::UnknownFormat
- ActionController::UnknownHttpMethod
- ActionController::BadRequest
- ActionDispatch::RemoteIp::IpSpoofAttackError
- ActiveRecord::PreparedStatementCacheExpired
- ActiveRecord::RecordNotFound
- CanCan::AccessDenied
- I18n::InvalidLocale
- Unicorn::ClientShutdown
- AgeRestricted
- MismatchedEntries
- InvalidEpisodesError
- CopyrightedResource
- Net::SMTPServerBusy
- Net::SMTPFatalError
- Interrupt
- Apipie::ParamMissing
- InvalidIdError
- InvalidParameterError
- EmptyContentError
- MalParser::RecordNotFound
- Errors::NotIdentifiedByImageMagickError
- Sidekiq::Shutdown
- Terrapin::ExitStatusError
# See http://docs.appsignal.com/ruby/configuration/options.html for
# all configuration options.
# Configuration per environment, leave out an environment or set active
# to false to not push metrics for that environment.
beta:
<<: *defaults
active: false
development:
<<: *defaults
active: false
production:
<<: *defaults
active: true
| 1,784 |
YAML
| 27.790322 | 72 | 0.727018 |
shikimori/shikimori/config/database.yml
|
development: &defaults
adapter: postgresql
encoding: utf8
database: <%= ENV['POSTGRES_DEV_DB'] %>
username: <%= ENV['POSTGRES_DEV_USER'] %>
password: <%= ENV['POSTGRES_DEV_PASSWORD'].presence %>
host: <%= ENV['POSTGRES_DEV_HOST'] %>
pool: 100
timeout: 5000
encoding: utf8
collation: ru_RU.UTF-8
ctype: ru_RU.UTF-8
template: template0
production:
<<: *defaults
test:
<<: *defaults
database: <%= ENV['POSTGRES_TEST_DB'] %><%=ENV['TEST_ENV_NUMBER'] %>
username: <%= ENV['POSTGRES_TEST_USER'] %>
password: <%= ENV['POSTGRES_TEST_PASSWORD'].presence %>
host: <%= ENV['POSTGRES_TEST_HOST'] %>
| 625 |
YAML
| 25.083332 | 70 | 0.632 |
shikimori/shikimori/config/chewy.yml
|
# config/chewy.yml
# separate environment configs
development: &development
host: 'localhost:9200'
prefix: 'shikimori_development'
test:
host: 'localhost:9200'
prefix: 'shikimori_test'
production:
host: 'localhost:9200'
prefix: <%=ENV['USER'] != 'morr' ? 'shikimori_production' : 'shikimori_development' %>
| 319 |
YAML
| 25.666665 | 88 | 0.714734 |
shikimori/shikimori/config/sidekiq.yml
|
:concurrency: 5
:pidfile: tmp/pids/sidekiq.pid
staging:
:concurrency: 5
production:
:concurrency: 80
:queues:
- [high_priority, 8]
- [critical, 10]
- [push_notifications, 2]
- [default, 5]
- [episode_notifications, 5]
- [cpu_intensive, 5]
- [slow_parsers, 5]
- [torrents_parsers, 5]
- [mal_parsers, 3]
- [anime365_parsers, 3]
- [webm_thumbnails, 5]
- [history_jobs, 5]
- [scores_jobs, 4]
- [low_priority, 1]
- [cleanup_jobs, 1]
- [mailers, 5]
- [imports, 4]
- [achievements, 6]
- [chewy, 10]
- [dangerous_actions, 8]
:limits:
cpu_intensive: 2
slow_parsers: 2
torrents_parsers: 1
webm_thumbnails: 1
history_jobs: 1
scores_jobs: 40
cleanup_jobs: 1
mal_parsers: 40
anime365_parsers: 3
push_notifications: 5
imports: 2
achievements: 50
episode_notifications: 1
dangerous_actions: 1
| 854 |
YAML
| 17.191489 | 30 | 0.633489 |
shikimori/shikimori/config/secrets.yml
|
# Be sure to restart your server when you modify this file.
# Your secret key is used for verifying the integrity of signed cookies.
# If you change this key, all old signed cookies will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
# You can use `rails secret` to generate a secure secret key.
# Make sure the secrets in this file are kept private
# if you're sharing your code publicly.
# Do not keep production secrets in the repository,
# instead read values from the environment.
development: &defaults
secret_key_base: fd4e8a95884930c25ebfc3020c53b1f4c128912e33c677f7a5139axx0c01b2ef5de41496e446abd4733ad2a3f51404c712acca297d967d651bddfcfd1c1f55aa
devise:
:secret_key: 2345678fg67fydg9843uitfgr9udfg8ui3ed89fiyucdv8uifgre80tfhgjv9oaf1324346dtyusfjkdsf8sd976732yhjkkednsc78sgcjhb7wyubhjdf867234ingp
:pepper: 8d33d1cb74746054xx09e1bccfc63a82fc9aa251cbe03e3d813985040a88cd37c63c35a6af657f9bb30719f243cee977ff0a431d628657e5e44046e178c3096a
recaptcha:
:v2:
:site_key: 6Le4Q58UAAAAAPykYvE5itXM04NSOsYeQUXzowWM
:secret_key: 6Le4Q58UAAAAAJ0ylh5Zx3GRIJMtfQoZSqNeVpwt
:v3:
:site_key: 6LePQ58UAAAAAJ7HyOCd3Y9VtF5Co8I_2kyQJW9y
:secret_key: 6LePQ58UAAAAALIpZbycjL-IZZtsp6ZtNg_PFi39
oauth:
:facebook:
:app_id: 337441442986680
:app_secret: 6750e33a1997602a019e30cdcd79ea13
:app_permissions: ""
:vkontakte:
:app_id: 2722473
:app_secret: G48K2YtxMajMo67ExE7a
:app_permissions: ""
:twitter:
:secret_key: U8CPcoMCerH9Dqct3sG1XDqBd47XJAroMSuf8Eucjl9YLM49ci
:consumer_key: JEukEItluUpRTJB7Tvd9uU9Sb
mailgun:
:login: xxxxxxxxxxxxxxxxxx
:password: xxxxxxxxxxxxxxxxxxxxxxxx
s3:
connection:
:server: s3-eu-west-1.amazonaws.com
:access_key_id: xxxxxxxxxxxxxxxxxxxx
:secret_access_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
:use_ssl: true
# :persistent: true
bucket: d.shikimori.org
max_file_size: 10485760
acl: public-read
access_key_id: xxxxxxxxxxxxxxxxxxxx
secret_access_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
gcm:
:token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1
api:
:anime_videos:
:token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
camo:
:host: localhost:5566
:port: 5566
:key: abc
:endpoint_path: '/'
faye:
:host: localhost:9292
:port: 9292
:endpoint_path: '/'
:token: xxxxxxxxxxxxxxxxxxxx
# https://proxy6.net/user/proxy
proxy:
:url: <%= ENV['PROXY_URL'] %>
:login: <%= ENV['PROXY_LOGIN'] %>
:password: <%= ENV['PROXY_PASSWORD'] %>
:vkontakte:
# https://oauth.vk.com/authorize?client_id=2427019&scope=video,offline&redirect_uri=http://api.vk.com/blank.html&display=page&response_type=token
:user_access_token: <%= ENV['VK_USER_ACCESS_TOKEN'] %>
vimeo:
:app_access_token: <%= ENV['VIMEO_APP_ACCESS_TOKEN'] %>
turnstile:
:site_key: <%= ENV['TURNSTILE_SITE_KEY'] %>
:secret_key: <%= ENV['TURNSTILE_SECRET_KEY'] %>
yandex_metrika:
:oauth_token: <%= ENV['YANDEX_METRIKA_OAUTH_TOKEN'] %>
test:
<<: *defaults
vkontakte:
:user_access_token: USER_ACCESS_TOKEN_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
vimeo:
:app_access_token: USER_ACCESS_TOKEN_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
production:
<<: *defaults
| 3,454 |
YAML
| 35.755319 | 149 | 0.739722 |
shikimori/shikimori/config/honeybadger.yml
|
---
# For more options, see https://docs.honeybadger.io/lib/ruby/gem-reference/configuration
api_key: 'hbp_tObQMWZaPeNe3kVaGAUZVUc8BAscDg3WQff7'
# The environment your app is running in.
env: "<%= Rails.env %>"
# The absolute path to your project folder.
root: "<%= Rails.root.to_s %>"
# Honeybadger won't report errors in these environments.
development_environments:
- test
- development
- cucumber
exceptions:
ignore:
- AbstractController::ActionNotFound
- ActionController::InvalidAuthenticityToken
- ActionController::ParameterMissing
- ActionController::RoutingError
- ActionController::UnknownFormat
- ActionController::UnknownHttpMethod
- ActionController::BadRequest
- ActionDispatch::RemoteIp::IpSpoofAttackError
- ActiveRecord::PreparedStatementCacheExpired
- ActiveRecord::RecordNotFound
- CanCan::AccessDenied
- I18n::InvalidLocale
- Unicorn::ClientShutdown
- AgeRestricted
- RknBanned
- MismatchedEntries
- InvalidEpisodesError
- CopyrightedResource
- Net::SMTPServerBusy
- Net::SMTPFatalError
- Interrupt
- Apipie::ParamMissing
- InvalidIdError
- InvalidParameterError
- EmptyContentError
- MalParser::RecordNotFound
- Errors::NotIdentifiedByImageMagickError
- Sidekiq::Shutdown
- Terrapin::ExitStatusError
# By default, Honeybadger won't report errors in the development_environments.
# You can override this by explicitly setting report_data to true or false.
# report_data: true
# The current Git revision of your project. Defaults to the last commit hash.
# revision: null
# Enable verbose debug logging (useful for troubleshooting).
debug: false
| 1,692 |
YAML
| 27.694915 | 88 | 0.74409 |
shikimori/shikimori/config/storage.yml
|
test:
service: Disk
root: <%= Rails.root.join("tmp/storage") %>
local:
service: Disk
root: <%= Rails.root.join("storage") %>
# Use rails credentials:edit to set the AWS secrets (as aws:access_key_id|secret_access_key)
# amazon:
# service: S3
# access_key_id: <%= Rails.application.credentials.dig(:aws, :access_key_id) %>
# secret_access_key: <%= Rails.application.credentials.dig(:aws, :secret_access_key) %>
# region: us-east-1
# bucket: your_own_bucket
# Remember not to checkin your GCS keyfile to a repository
# google:
# service: GCS
# project: your_project
# credentials: <%= Rails.root.join("path/to/gcs.keyfile") %>
# bucket: your_own_bucket
# Use rails credentials:edit to set the Azure Storage secret (as azure_storage:storage_access_key)
# microsoft:
# service: AzureStorage
# storage_account_name: your_account_name
# storage_access_key: <%= Rails.application.credentials.dig(:azure_storage, :storage_access_key) %>
# container: your_container_name
# mirror:
# service: Mirror
# primary: local
# mirrors: [ amazon, google, microsoft ]
| 1,093 |
YAML
| 30.257142 | 101 | 0.693504 |
shikimori/shikimori/config/locales/services.en.yml
|
en:
list_compare_service:
group_by_key:
both: In both lists
user_only: "In %{nickname}'s list only"
user_rate_status:
planned: planned
dropped: dropped
messages/create_notification:
nickname_changed: >-
Your friend %{old_nickname} changed nickname to %{new_nickname}.
user_registered_message: >-
Welcome!
[url=%{faq_url}]Here[/url]
you'll find the answers to most frequently asked questions.
You can import anime and manga lists from
[url=http://myanimelist.net]myanimelist.net[/url] in your
[url=/%{settings_path}/edit/account]profile settings[/url].
You can change your nickname there as well.
Before you start posting on the forum we recommend you get familiar with our
[url=%{site_rules_url}]site rules[/url].
If you have any questions or suggestions
feel free to post them on the forum - we'll try to help you.
moderatable_banned:
without_reason: >-
Your [entry=%{topic_id}]%{entry_name}[/entry] was moved to offtopic
reason: >-
because of [quote=%{approver_nickname}]%{reason}[/quote]
bad_email_message: >-
Our mail delivery service couldn't deliver mail to your
email %{email}.
You have either specified not existing email in profile settings
or marked one of our delivered mails as spam.
We recommend that you change your email in profile settings,
or else you won't be able to restore your account
if you forget you password.
omniauth_service:
new_user: New user
bb_codes/tags/replies_tag:
reply: 'Reply: '
replies: 'Replies: '
bb_codes/tags/contest_status_tag:
started: started
finished: finished
bb_codes/tags/contest_round_status_tag:
started: started
finished: finished
messages/generate_body:
profile_comment: >-
Left a comment in your
<a class='b-link' href='%{profile_url}'>profile</a>.
friend_request:
add: Add @{f:her|m:him} to your friend list as well?
added: Has added you to @{f:her|m:him} friend list.
quoted_by_user: >-
You have been <a class="b-link" href="%{comment_url}">mentioned</a>
%{linked_name}
anons: '%{linked_name} anime announced'
ongoing: '%{linked_name} anime airing'
episode: '%{linked_name} episode %{episode} released'
released: '%{linked_name} anime released'
subscription_commented: New messages %{linked_name}
warned:
target: >-
You have been warned for
posting a %{target_type_name} %{linked_name}.
missing: >-
You have been warned for
posting a %{target_type_name} (<em>removed</em>).
Reason: "%{reason}".
other: >-
You have been warned. Reason: "%{reason}".
banned:
target: >-
You have been banned for %{duration} for
posting a %{target_type_name} %{linked_name}.
missing: >-
You have been banned for %{duration} for
posting a %{target_type_name} (<em>removed</em>).
Reason: "%{reason}".
other: >-
You have been banned for %{duration}. Reason: "%{reason}".
club_request: Invitation to join the club [club]%{club_id}[/club].
version_accepted: >-
Your [version=%{version_id}]content change[/version] for
[%{item_type}]%{item_id}[/%{item_type}] was accepted.
version_rejected: >-
Your [version=%{version_id}]content change[/version] for
[%{item_type}]%{item_id}[/%{item_type}] was rejected.
version_rejected_with_reason: >-
Your [version=%{version_id}]content change[/version] for
[%{item_type}]%{item_id}[/%{item_type}] was rejected because of
[quote=%{moderator}]%{reason}[/quote]
messages/mention_source:
simple_mention:
nil: <em>deleted</em>
topic: &default_simple_mention <a href="%{url}"%{bubble}>%{name}</a>
profile: *default_simple_mention
review: *default_simple_mention
critique: *default_simple_mention
article: *default_simple_mention
collection: *default_simple_mention
text_mention:
nil: in <em>deleted</em>.
topic: in topic <a href="%{url}"%{bubble}>%{name}</a>.
profile: in <a href="%{url}"%{bubble}>%{name}</a>'s profile.
review: in review for <a href="%{url}"%{bubble}>%{name}</a>.
messages/check_spam_abuse:
ban_text: >-
You are banned for spam.
Send a word to %{email} in case if you are innocent.
users/check_hacked:
lock_text: >-
This account was used to spam on the site. Account access has been blocked.
To recover access, use the password recovery page. %{recovery_url}
In order to avoid such situations in the future,
we recommend that you do not use simple passwords like "123", "qwerty", "anime", "naruto", etc.
moderations/banhammer:
ban_reason: '[url=%{url}]site rule #3[/url]'
| 4,892 |
YAML
| 36.068182 | 101 | 0.632461 |
shikimori/shikimori/config/locales/value_objects.en.yml
|
en:
titles/season_title:
anime: &anime_season_titles
catalog:
season:
winter: Winter %{year}
spring: Spring %{year}
summer: Summer %{year}
fall: Fall %{year}
year: '%{year} year'
decade: '%{decade}0s'
ancient: Older
short:
season :
winter: Winter Season
spring: Spring Season
summer: Summer Season
fall: Fall Season
year: '%{year} year'
full:
season:
winter: Winter %{year} Anime
spring: Spring %{year} Anime
summer: Summer %{year} Anime
fall: Fall %{year} Anime
year: Anime %{year}
manga: &manga_season_titles
<<: *anime_season_titles
full:
season:
winter: Winter %{year} Manga
spring: Spring %{year} Manga
summer: Summer %{year} Manga
fall: Fall %{year} Manga
year: Manga %{year}
ranobe:
<<: *manga_season_titles
titles/status_title:
anime: &anime_status_titles
anons: Planned
ongoing: Airing
released: Released
latest: Aired recently
manga: &manga_status_titles
<<: &anime_status_titles
ongoing: Publishing
latest: Published recently
ranobe:
<<: *manga_status_titles
| 1,323 |
YAML
| 24.960784 | 38 | 0.538171 |
shikimori/shikimori/config/locales/decorators.ru.yml
|
ru:
db_entry_decorator: &db_entry_decorator
no_description: Нет описания
ani_manga_decorator: &ani_manga_decorator
<<: *db_entry_decorator
time_ago_format: '%s назад'
anime_decorator:
<<: *ani_manga_decorator
anime_video_preview_decorator:
score:
excellent: отлично
good: хорошо
okay: нормально
character_decorator:
<<: *db_entry_decorator
job_title:
character: Персонаж
anime: Персонаж аниме
anime_manga: Персонаж аниме и манги
anime_manga_ranobe: Персонаж аниме, манги и ранобэ
anime_ranobe: Персонаж аниме и ранобэ
manga: Персонаж манги
manga_ranobe: Персонаж манги и ранобэ
ranobe: Персонаж ранобэ
contest_decorator:
<<: *db_entry_decorator
club_decorator:
<<: *db_entry_decorator
manga_decorator:
<<: *ani_manga_decorator
person_decorator: &person_decorator
<<: *db_entry_decorator
job_title:
producer: Режиссёр аниме
mangaka: Автор манги
composer: Композитор
vocalist: Вокалист
seyu: Сэйю
anime_manga_projects_participant: Участник аниме и манга проектов
# anime_manga_ranobe_projects_participant: Участник аниме, манга и ранобэ проектов
anime_projects_participant: Участник аниме проектов
# anime_ranobe_projects_participant: Участник аниме и ранобэ проектов
manga_projects_participant: Участник манга проектов
# manga_ranobe_projects_participant: Участник манга и ранобэ проектов
# ranobe_projects_participant: Участник ранобэ проектов
_projects_participant: ''
seyu_decorator:
<<: *person_decorator
collection_decorator:
<<: *db_entry_decorator
user_decorator: &user_decorator
always_online: всегда на сайте
always_online_bot: всегда на сайте (бот)
online: сейчас на сайте
offline: 'в сети: %{time_ago}%{ago}'
ago: назад
registration_formats:
full: '%e %B %Y г.'
month_year: '%d %B %Y г.'
year: '%Y г.'
user_history_decorator:
actions:
add: Добавлено в список
delete: Удалено из списка
complete_with_score: '%{status_name} и оценено на <b>%{score}</b>'
episodes:
completed_movie: Просмотрен фильм
completed_anime: Просмотрены все эпизоды
completed_novel: Прочитана новелла
completed_manga: Прочитана манга
reset_anime: Сброшено число эпизодов
reset_manga: Сброшено число томов и глав
rate:
cancelled: Отменена оценка
changed: Изменена оценка c <b>%{prior_score}</b> на <b>%{score}</b>
rated: Оценено на <b>%{score}</b>
import:
anime: Импортировано аниме - %{records}
manga: Импортирована манга - %{records}
registration: Регистрация на сайте
anime_history_clear: Очистка истории аниме
manga_history_clear: Очистка истории манги
time_ago: '%{time_ago} назад'
watched_one_episode: '%{watched} %{number}%{suffix} %{division}'
watched_two_episodes: '%{watched} %{number_first}%{suffix} и %{number_second}%{suffix} %{division}'
watched_three_episodes: '%{watched} %{number_first}%{suffix}, %{number_second}%{suffix} и %{number_third}%{suffix} %{division}'
watched_episodes_range: '%{watched} с %{number_first}%{suffix_first} по %{number_last}%{suffix_last} %{division}'
watched_first_episodes: '%{watched} %{number}%{suffix} %{division}'
user_profile_decorator:
<<: *user_decorator
version_decorator:
field_name:
screenshots_upload: Загрузка кадров
screenshots_delete: Удаление кадров
screenshots_reposition: Порядок кадров
poster_upload: Загрузка постера
poster_delete: Удаление постера
video_upload: Загрузка видео
video_delete: Удаление видео
role_add: Добавление роли
role_remove: Удаление роли
| 3,807 |
YAML
| 36.333333 | 131 | 0.669031 |
shikimori/shikimori/config/locales/phrases.en.yml
|
en:
actions:
accept: Accept
actions: Actions
add: Add
apply: Apply
cancel: Cancel
confirm: This action is irreversible. Are you sure?
confirm_delete: Confirm deletion
confirm_sync: Confirm sync
confirm_simple: Are you sure?
cancel_delete: Cancel deletion
create: Create
delete: Delete
delete_all: Delete All
edit: Edit
ignore: Ignore
markers:
summary:
add: Add summary mark
remove: Remove summary mark
confirm_add: Add summary mark?
confirm_remove: Remove summary mark?
offtopic:
add: Add offtopic mark
remove: Remove offtopic mark
confirm_add: Add offtopic mark?
confirm_remove: Remove offtopic mark?
review:
convert_to_comment: Convert review into comment?
comment:
convert_to_review: Convert comment into review?
moderate: Moderate
moderation:
non_strict_moderation: This comment must be moderated only in case of laws violence
abuse: Abuse
laws_abuse: Russian law abuse
ban: Ban
hide_to_spoiler: Hide to spoiler
not_offtopic: It's not offtopic
not_summary: It's not summary
not_review: It's not review
offtopic: It's offtopic
spoiler: It's spoiler
summary: It's summary
review: It's review
explain:
abuse: Please describe (optional)
spoiler: Please describe (optional)
edition: Edition
increment: Increment (+)
preview: Preview
quote: Quote
reject: Reject
reply: Reply
rollback: Rollback (-)
save_apply: Save & Apply
start: Start
stop: Stop
take: Take
upload: Upload
upload_image: Upload image
write: Write
by:
id: By ID
aired_on: By release date
date_added: By date added
date_updated: By date updated
kind: By type
name: In alphabetical order
chapters: By number of chapters
episodes: By number of episodes
volumes: By number of volumes
popularity: By popularity
ranked: By rank
random: By random
ranked_random: By random
ranked_shiki: By Shikimori ranking
score: By score
status: By status
answers:
'no': 'no'
'yes': 'yes'
about_site: About site
anime_industry: Anime industry
anime_list: Anime List
anime_title: Anime title
back: back
back_to_page: Back to page
calendar: Calendar
changes_not_saved: Changes not saved!
changes_saved: Changes saved
character_name: Character name
club_name: Club name
collection_name: Collection name
collapse: collapse
cosplay: Cosplay
deleted:
anime: Deleted anime
character: Deleted character
manga: Deleted manga
critique: Deleted critique
video: Deleted video
anime_video: Deleted video
user: Deleted user
error: error
female: female
forum: Forum
gallery: Gallery
goto: Goto
imageboard_art: Imageboard art
in_clubs: In Clubs
in_collections: In Collections
in_english: In English
in_favorites: In favorites
in_japanese: In Japanese
in_russian: In Russian
information: Information
loading: Loading...
previous_value: Previous value
markers:
abuse: abuse
new: new
offtopic: offtopic
spoiler: spoiler
convert_review: review
summary: summary
mail: Mail
male: male
manga_list: Manga List
manga_title: Manga title
mangaka: Mangaka
moderators_only: For moderators only
news: News
no_synopsis: No synopsis
no_comments: No comments
no_collections: No collections
no_critiques: No critiques
no_summaries: No summaries
no_topics: No topics
no_reviews: No reviews
nothing_found: Nothing found
nothing_here: Nothing here
notifications: Notifications
of: of
for: for
page: Page %{page}
page_not_found: Page not found
pagination:
back: Back
next: Next
person: Person
person_name: Person name
producer: Director
ranobe_title: Light Novel title
settings: Settings
seyu: Seiyu
share: Share
search:
search: Search...
title: Search by title...
name: Search by name...
text: Search by text...
shikimori: Shikimori
source: Source
sponsors: Sponsors
this:
anime: this anime
manga: this manga
this_action_cant_be_undone: This action can't be undone!
tournament_bracket: Tournament bracket
total: Total
username: User name
under_development: Under development
page_under_development: This page is not completed and is under development
yes_i_confirm: Yes, I confirm
date_field: Date
navigation: Navigation
form_errors: Errors
'yes': 'Yes'
'no': 'No'
is_deleted: Deleted
facebook: 'Meta Platforms**, as well as its social network Facebook* : ** recognized as an extremist organization, its activities are banned in Russia. * banned in Russia'
facebook_html: 'Meta Platforms**, as well as its social network Facebook*:<br>** recognized as an extremist organization, its activities are banned in Russia.<br>* banned in Russia'
| 4,991 |
YAML
| 25.983784 | 183 | 0.685634 |
shikimori/shikimori/config/locales/helpers.en.yml
|
en:
anime_helper:
minute: min.
hour:
zero: hours
one: hour
other: hours
| 100 |
YAML
| 11.624999 | 18 | 0.52 |
shikimori/shikimori/config/locales/services.ru.yml
|
ru:
list_compare_service:
group_by_key:
both: В обоих списках
user_only: Только в списке %{nickname}
user_rate_status:
planned: в планах
dropped: брошено
messages/create_notification:
nickname_changed: >-
@{f:Твоя|m:Твой} @{f:подруга|m:друг} %{old_nickname}
@{f:изменила|m:изменил} никнейм на %{new_nickname}.
user_registered_message: >-
Добро пожаловать.
[url=%{faq_url}]Здесь[/url]
находятся ответы на наиболее часто задаваемые вопросы.
Импортировать список аниме и манги из
[url=http://myanimelist.net]myanimelist.net[/url] можно в
[url=/%{settings_path}/edit/account]настройках профиля[/url].
Там же можно изменить свой никнейм.
Перед публикацией на форуме рекомендуем ознакомиться с
[url=%{site_rules_url}]правилами сайта[/url].
Если возникнут вопросы или пожелания - пиши на форуме,
мы постараемся тебе ответить.
moderatable_banned:
without_reason: >-
Твоя [entry=%{topic_id}]%{entry_name}[/entry] перенесена в оффтоп
reason: >-
по причине [quote=%{approver_nickname}]%{reason}[/quote]
bad_email_message: >-
Наш почтовый сервис не смог доставить письмо на твою почту %{email}.
Ты либо @{f:указала|m:указал} несуществующий почтовый ящик,
либо когда-то @{f:пометила|m:пометил} одно из наших писем как спам.
Рекомендуем сменить электронный адрес в настройках профиля,
иначе при утрате пароля ты не сможешь восстановить пароль от аккаунта.
omniauth_service:
new_user: Новый пользователь
bb_codes/tags/replies_tag:
reply: 'Ответ: '
replies: 'Ответы: '
bb_codes/tags/contest_status_tag:
started: начат
finished: завершён
bb_codes/tags/contest_round_status_tag:
started: начат
finished: завершён
messages/generate_body:
profile_comment: >-
@{f:Написала|m:Написал} что-то в твоём
<a class='b-link' href='%{profile_url}'>профиле</a>.
friend_request:
add: Добавить @{f:её|m:его} в твой список друзей в ответ?
added: "@{f:Добавила|m:Добавил} тебя в список друзей."
quoted_by_user: >-
@{f:Написала|m:Написал} <a class="b-link" href="%{comment_url}">что-то</a>
тебе %{linked_name}
anons: Анонсировано аниме %{linked_name}
ongoing: Начат показ аниме %{linked_name}
episode: Вышел %{episode} эпизод аниме %{linked_name}
released: Завершён показ аниме %{linked_name}
subscription_commented: Новые сообщения %{linked_name}
warned:
target: >-
Тебе вынесено предупреждение за
%{target_type_name} %{linked_name}.
missing: >-
Тебе вынесено предупреждение за
%{target_type_name} (<em>удалён</em>).
Причина: "%{reason}".
other: >-
Тебе вынесено предупреждение. Причина: "%{reason}".
banned:
target: >-
Ты @{f:забанена|m:забанен} на %{duration} за
%{target_type_name} %{linked_name}.
missing: >-
Ты @{f:забанена|m:забанен} на %{duration} за
%{target_type_name} (<em>удалён</em>).
Причина: "%{reason}".
other: >-
Ты @{f:забанена|m:забанен} на %{duration}. Причина: "%{reason}".
club_request: Приглашение на вступление в клуб [club]%{club_id}[/club].
version_accepted: >-
Твоя [version=%{version_id}]правка[/version] для
[%{item_type}]%{item_id}[/%{item_type}] принята.
version_rejected: >-
Твоя [version=%{version_id}]правка[/version] для
[%{item_type}]%{item_id}[/%{item_type}] отклонена.
version_rejected_with_reason: >-
Твоя [version=%{version_id}]правка[/version] для
[%{item_type}]%{item_id}[/%{item_type}] отклонена по причине:
[quote=%{moderator}]%{reason}[/quote]
messages/mention_source:
simple_mention:
nil: <em>удалено</em>
topic: &default_simple_mention <a href="%{url}"%{bubble}>%{name}</a>
profile: *default_simple_mention
review: *default_simple_mention
critique: *default_simple_mention
article: *default_simple_mention
collection: *default_simple_mention
text_mention:
nil: в <em>удалено</em>.
topic: в топике <a href="%{url}"%{bubble}>%{name}</a>.
profile: в профиле пользователя <a href="%{url}"%{bubble}>%{name}</a>.
review: в отзыве к <a href="%{url}"%{bubble}>%{name}</a>.
messages/check_spam_abuse:
ban_text: >-
Ты @{f:забанена|m:забанен} за спам.
Напиши на %{email}, если ты @{f:невиновна|m:невиновен}.
users/check_hacked:
lock_text: >-
С этого аккаунта на сайте рассылается спам. Доступ к аккаунту забанен.
Для восстановления доступа воспользуйтесь страницей восстановления пароля. %{recovery_url}
Чтобы в дальнейшем избежать подобных ситуаций,
рекомендуем не использовать простые пароли вроде "123", "qwerty", "anime", "naruto" и т.п.
moderations/banhammer:
ban_reason: п.3 [url=%{url}]правил сайта[/url]
| 4,947 |
YAML
| 36.484848 | 96 | 0.639377 |
shikimori/shikimori/config/locales/verbs.en.yml
|
en:
verbs:
watched_episodes:
zero: watched
one: watched
other: watched
read_volumes:
zero: read
one: read
other: read
read_chapters:
zero: read
one: read
other: read
added_by:
zero: added by
one: added by
other: added by
wrote:
zero: wrote
one: wrote
other: wrote
| 376 |
YAML
| 15.391304 | 21 | 0.526596 |
shikimori/shikimori/config/locales/inflections.en.yml
|
en:
inflections:
years_old:
zero: yo
one: yo
other: yo
datetime:
second:
zero: seconds
one: second
other: seconds
minute:
zero: minutes
one: minute
other: minutes
hour:
zero: hours
one: hour
other: hours
day:
zero: days
one: day
other: days
week:
zero: weeks
one: week
other: weeks
month:
zero: months
one: month
other: months
year:
zero: years
one: year
other: years
ordinal:
studio:
one: publisher
few: publishers
ranobe:
zero: light novels
one: light novel
other: light novels
inflections:
user_signed_in:
signed_in: signed in
not_signed_in: not signed in
| 875 |
YAML
| 15.846154 | 36 | 0.482286 |
shikimori/shikimori/config/locales/devise.en.yml
|
# Additional translations at https://github.com/plataformatec/devise/wiki/I18n
en:
devise:
confirmations:
confirmed: "Your email address has been successfully confirmed."
send_instructions: "You will receive an email with instructions for how to confirm your email address in a few minutes."
send_paranoid_instructions: "If your email address exists in our database, you will receive an email with instructions for how to confirm your email address in a few minutes."
failure:
already_authenticated: "You are already signed in."
inactive: "Your account is not activated yet."
invalid: "Invalid %{authentication_keys} or password."
locked: "Your account is locked."
last_attempt: "You have one more attempt before your account is locked."
not_found_in_database: "Invalid %{authentication_keys} or password."
timeout: "Your session expired. Please sign in again to continue."
unauthenticated: "You need to sign in or sign up before continuing."
unconfirmed: "You have to confirm your email address before continuing."
mailer:
confirmation_instructions:
subject: "Confirmation instructions"
reset_password_instructions:
subject: "Reset password instructions"
unlock_instructions:
subject: "Unlock instructions"
email_changed:
subject: "Email Changed"
password_change:
subject: "Password Changed"
omniauth_callbacks:
register: "Successfully registered from %{kind} account."
failure: "Could not authenticate you from %{kind} because \"%{reason}\"."
success: "Successfully authenticated from %{kind} account."
passwords:
no_token: "You can't access this page without coming from a password reset email. If you do come from a password reset email, please make sure you used the full URL provided."
send_instructions: "You will receive an email with instructions on how to reset your password in a few minutes."
send_paranoid_instructions: "If your email address exists in our database, you will receive a password recovery link at your email address in a few minutes."
updated: "Your password has been changed successfully. You are now signed in."
updated_not_active: "Your password has been changed successfully."
registrations:
destroyed: "Bye! Your account has been successfully cancelled. We hope to see you again soon."
signed_up: "Welcome! You have signed up successfully."
signed_up_but_inactive: "You have signed up successfully. However, we could not sign you in because your account is not yet activated."
signed_up_but_locked: "You have signed up successfully. However, we could not sign you in because your account is locked."
signed_up_but_unconfirmed: "A message with a confirmation link has been sent to your email address. Please follow the link to activate your account."
update_needs_confirmation: "You updated your account successfully, but we need to verify your new email address. Please check your email and follow the confirm link to confirm your new email address."
updated: "Your account has been updated successfully."
sessions:
signed_in: "Signed in successfully."
signed_out: "Signed out successfully."
already_signed_out: "Signed out successfully."
user:
already_signed_out: :devise.sessions.signed_out
signed_in: :devise.sessions.signed_in
signed_out: :devise.sessions.signed_out
unlocks:
send_instructions: "You will receive an email with instructions for how to unlock your account in a few minutes."
send_paranoid_instructions: "If your account exists, you will receive an email with instructions for how to unlock it in a few minutes."
unlocked: "Your account has been unlocked successfully. Please sign in to continue."
errors:
messages:
already_confirmed: "was already confirmed, please try signing in"
confirmation_period_expired: "needs to be confirmed within %{period}, please request a new one"
expired: "has expired, please request a new one"
not_found: "not found"
not_locked: "was not locked"
not_saved:
one: "1 error prohibited this %{resource} from being saved:"
other: "%{count} errors prohibited this %{resource} from being saved:"
| 4,356 |
YAML
| 61.242856 | 206 | 0.719467 |
shikimori/shikimori/config/locales/achievements.en.yml
|
en:
achievements:
group:
common: Common
genre: By Genres
franchise: By Franchises
author: By Authors
neko_name:
action: Action
animelist: Anime List
comedy: Comedy
dementia_psychological: Dementia / Psychological
drama: Drama
fantasy: Fantasy
fujoshi: Fujoshi
gar: GAR
historical: Historical
horror_thriller: Horror / Thriller
josei: Josei
kids: For Kids
kuudere: Kuudere
longshounen: Long Title
mahou_shoujo: Mahou Shoujo
mecha: Mecha
military: Military
moe: Moe
music: Music
mystery: Mystery
oldfag: Classic
oniichan: Forbidden love
otaku: Art historian
police: Police
romance: Romance
scifi: Sci-Fi
seinen: Seinen
shortie: Short film
slice_of_life: Slice of Life
sovietanime: \"Our"\ anime
stop_motion: Stop Motion
space: Space
sports: Sport
supernatural: Supernatural
test: Unknown achievement
tsundere: Tsundere
yandere: Yandere
yuuri: Yuri
genki: Genki
world_masterpiece_theater: World Masterpiece Theater
hint:
default: '%{neko_name} level %{level}'
animelist: '%{threshold} anime watched'
| 1,287 |
YAML
| 23.76923 | 58 | 0.616939 |
shikimori/shikimori/config/locales/roles.en.yml
|
en:
role:
2nd Key Animation: 2nd Key Animation
ADR Director: ADR Director
Animation Check: Animation Check
Animation Director: Animation Director
Art Director: Art Director
Art: Art
Assistant Animation Director: Assistant Animation Director
Assistant Director: Assistant Director
Assistant Engineer: Assistant Engineer
Assistant Producer: Assistant Producer
Assistant Production Coordinat: Assistant Production Coordinat
Associate Casting Director: Associate Casting Director
Associate Producer: Associate Producer
Background Art: Background Art
Brazilian: Seiyu (BR)
Casting Director: Casting Director
Character Design: Character Design
Chief Animation Director: Chief Animation Director
Chief Producer: Chief Producer
Co-Director: Co-Director
Co-Producer: Co-Producer
Color Design: Color Design
Color Setting: Color Setting
Creator: Creator
Dialogue Editing: Dialogue Editing
Digital Paint: Digital Paint
Director of Photography: Director of Photography
Director: Director
Editing: Editing
English: Seiyu (EN)
Episode Director: Episode Director
Executive Producer: Executive Producer
French: Seiyu (FR)
German: Seiyu (DE)
Hebrew: Seiyu (IL)
Hungarian: Seiyu (HU)
In-Between Animation: In-Between Animation
Inserted Song Performance: Inserted Song Performance
Italian: Seiyu (IT)
Japanese: Seiyu
Key Animation: Key Animation
Korean: Seiyu (KR)
Layout: Layout
Mandarin: Seiyu (CH)
Mechanical Design: Mechanical Design
Music: Music
Online Editing Supervision: Online Editing Supervision
Online Editor: Online Editor
Original Character Design: Original Character Design
Original Creator: Original Creator
Planning Producer: Planning Producer
Planning: Planning
Portuguese (BR): Seiyu (BR)
Post-Production Assistant: Post-Production Assistant
Principle Drawing: Principle Drawing
Producer: Producer
Production Assistant: Production Assistant
Production Coordination: Production Coordination
Production Manager: Production Manager
Publicity: Publicity
Re-Recording Mixing: Re-Recording Mixing
Recording Assistant: Recording Assistant
Recording Engineer: Recording Engineer
Recording: Recording
Screenplay: Screenplay
Script: Script
Series Composition: Series Composition
Series Production Director: Series Production Director
Setting Manager: Setting Manager
Setting: Setting
Sound Director: Sound Director
Sound Effects: Sound Effects
Sound Manager: Sound Manager
Sound Supervisor: Sound Supervisor
Spanish: Seiyu (ES)
Special Effects: Special Effects
Spotting: Spotting
Story & Art: Story & Art
Story: Story
Storyboard: Storyboard
Theme Song Arrangement: Theme Song Arrangement
Theme Song Composition: Theme Song Composition
Theme Song Lyrics: Theme Song Lyrics
Theme Song Performance: Theme Song Performance
| 3,045 |
YAML
| 34.011494 | 66 | 0.743842 |
shikimori/shikimori/config/locales/verbs.ru.yml
|
ru:
verbs:
watched_episodes:
one: просмотрен
few: просмотрены
many: просмотрено
read_volumes:
one: прочитан
few: прочитаны
many: прочитано
read_chapters:
one: прочитана
few: прочитаны
many: прочитано
added_by:
one: '@{f:добавила|m:добавил}'
few: добавили
many: добавили
wrote:
one: '@{f:написала|m:написал}'
few: написали
many: написали
| 449 |
YAML
| 18.565217 | 36 | 0.57461 |
shikimori/shikimori/config/locales/value_objects.ru.yml
|
ru:
titles/season_title:
anime: &anime_season_titles
catalog:
season:
winter: Зима %{year}
spring: Весна %{year}
summer: Лето %{year}
fall: Осень %{year}
year: '%{year} год'
decade: '%{decade}0-е годы'
ancient: Более старые
short:
season:
winter: Зимний сезон
spring: Весенний сезон
summer: Летний сезон
fall: Осенний сезон
year: '%{year} год'
full:
season:
winter: Зимний сезон %{year} года
spring: Весенний сезон %{year} года
summer: Летний сезон %{year} года
fall: Осенний сезон %{year} года
year: Аниме %{year} года
manga: &manga_season_titles
<<: *anime_season_titles
full:
season:
winter: Зимний сезон %{year} года
spring: Весенний сезон %{year} года
summer: Летний сезон %{year} года
fall: Осенний сезон %{year} года
year: Манга %{year} года
ranobe:
<<: *manga_season_titles
titles/status_title:
anime: &anime_status_titles
anons: Анонсы
ongoing: Онгоинги
released: Вышедшее
latest: Недавно вышедшее
manga: &manga_status_titles
<<: &anime_status_titles
ongoing: Выходящее
ranobe:
<<: *manga_status_titles
ongoing: Онгоинги
| 1,383 |
YAML
| 26.137254 | 45 | 0.550253 |
shikimori/shikimori/config/locales/roles.ru.yml
|
ru:
role:
2nd Key Animation: Второстепен. анимация
ADR Director: Режиссёр перевода
Animation Check: Контроль анимации
Animation Director: Режиссёр анимации
Art Director: Арт-директор
Art: Рисовка
Assistant Animation Director: Помощник режиссёра анимации
Assistant Director: Помощник режиссёра
Assistant Engineer: Инженер-ассистент
Assistant Producer: Ассистент продюсера
Assistant Production Coordinat: Координация работ
Associate Casting Director: Помощник директора по кастингу
Associate Producer: Помощник продюсера
Background Art: Фоновая рисовка
Brazilian: Сэйю (BR)
Casting Director: Директор по кастингу
Character Design: Дизайн персонажей
Chief Animation Director: Главный аниматор
Chief Producer: Главный продюсер
Co-Director: Второй режиссёр
Co-Producer: Второй продюсер
Color Design: Дизайн цвета
Color Setting: Настройка цвета
Creator: Автор
Dialogue Editing: Редактор диалогов
Digital Paint: Компьютерная рисовка
Director of Photography: Оператор-постановщик
Director: Режиссёр
Editing: Монтаж
English: Сэйю (EN)
Episode Director: Режиссёр эпизодов
Executive Producer: Исполнительн. продюсер
French: Сэйю (FR)
German: Сэйю (DE)
Hebrew: Сэйю (IL)
Hungarian: Сэйю (HU)
In-Between Animation: Промежуточ. анимация
Inserted Song Performance: Музыкальное сопровождение
Italian: Сэйю (IT)
Japanese: Сэйю
Key Animation: Ключевая анимация
Korean: Сэйю (KR)
Layout: Вёрстка
Mandarin: Сэйю (CH)
Mechanical Design: Дизайн макетов
Music: Музыка
Online Editing Supervision: Надзор за редакторами
Online Editor: Редактор
Original Character Design: Оригинал. дизайн персонажей
Original Creator: Автор оригинала
Planning Producer: Продюсер планирования
Planning: Планирование
Portuguese (BR): Португальский (BR)
Post-Production Assistant: Пост-продакшн
Principle Drawing: Принцип рисовки
Producer: Продюсер
Production Assistant: Ассистент по производству
Production Coordination: Координация работы
Production Manager: Менеджер по производству
Publicity: Реклама
Re-Recording Mixing: Микширование звука
Recording Assistant: Помощник звукооператора
Recording Engineer: Звукооператор
Recording: Звукооператор
Screenplay: Сценарий
Script: Сценарий
Series Composition: Компоновка серий
Series Production Director: Директор по производству
Setting Manager: Менеджер по настройке
Setting: Настройка
Sound Director: Звукорежиссёр
Sound Effects: Звуковые эффекты
Sound Manager: Звукорежиссёр
Sound Supervisor: Звукорежиссёр
Spanish: Сэйю (ES)
Special Effects: Спецэффекты
Spotting: Корректировка
Story & Art: Сюжет и иллюстрации
Story: Сюжет
Storyboard: Раскадровка
Theme Song Arrangement: Аранжировка гл. муз. темы
Theme Song Composition: Композитор гл. муз. темы
Theme Song Lyrics: Лирика гл. муз. темы
Theme Song Performance: Исполнение гл. муз. темы
| 3,080 |
YAML
| 34.413793 | 62 | 0.744805 |
shikimori/shikimori/config/locales/decorators.en.yml
|
en:
db_entry_decorator: &db_entry_decorator
no_description: No description
ani_manga_decorator: &ani_manga_decorator
<<: *db_entry_decorator
time_ago_format: '%s ago'
anime_decorator:
<<: *ani_manga_decorator
anime_video_preview_decorator:
score:
excellent: excellent
good: good
okay: okay
character_decorator:
<<: *db_entry_decorator
job_title:
character: Character
anime: Anime character
anime_manga: Anime & Manga Character
anime_manga_ranobe: Anime & Manga & Light Novel character
anime_ranobe: Anime & Light Novel character
manga: Manga character
manga_ranobe: Manga & Light Novel character
ranobe: Light Novel character
contest_decorator:
<<: *db_entry_decorator
club_decorator:
<<: *db_entry_decorator
manga_decorator:
<<: *ani_manga_decorator
person_decorator: &person_decorator
<<: *db_entry_decorator
job_title:
producer: Producer
mangaka: Mangaka
composer: Composer
seyu: Seiyu
vocalist: Vocalist
anime_manga_projects_participant: Anime & Manga projects participant
# anime_manga_ranobe_projects_participant: Anime, Manga & Light novel projects participant
anime_projects_participant: Anime projects participant
# anime_ranobe_projects_participant: Anime & Light novel projects participant
manga_projects_participant: Manga projects participant
# manga_ranobe_projects_participant: Manga & Light novel projects participant
# ranobe_projects_participant: Light novel projects participant
_projects_participant: ''
seyu_decorator:
<<: *person_decorator
collection_decorator:
<<: *db_entry_decorator
user_decorator: &user_decorator
always_online: always online
always_online_bot: always online (bot)
online: online
offline: last online %{time_ago}%{ago}
ago: ago
registration_formats:
full: '%B %e, %Y'
month_year: '%B %Y'
year: '%Y'
user_history_decorator:
actions:
add: Added to list
delete: Removed from list
complete_with_score: '%{status_name} and rated <b>%{score}</b>'
episodes:
completed_movie: Watched movie
completed_anime: Watched all episodes
completed_novel: Read novel
completed_manga: Read manga
reset_anime: Reset episodes count
reset_manga: Reset volumes and chapters count
rate:
cancelled: Score removed
changed: Score changed from <b>%{prior_score}</b> to <b>%{score}</b>
rated: Rated <b>%{score}</b>
import:
anime: Anime imported - %{records}
manga: Manga imported - %{records}
registration: Registration
anime_history_clear: Anime history cleared
manga_history_clear: Manga history cleared
time_ago: '%{time_ago} ago'
watched_one_episode: '%{watched} %{division} %{number}'
watched_two_episodes: '%{watched} %{number_first} and %{number_second} %{division}'
watched_three_episodes: '%{watched} %{number_first}, %{number_second} and %{number_third} %{division}'
watched_episodes_range: '%{watched} %{division} %{number_first}-%{number_last}'
watched_first_episodes: '%{watched} %{number} %{division}'
user_profile_decorator:
<<: *user_decorator
version_decorator:
field_name:
screenshots_upload: Screenshots upload
screenshots_delete: Screenshots delete
screenshots_reposition: Screenshots order
poster_upload: Poster upload
poster_delete: Poster delete
video_upload: Video upload
video_delete: Video delete
role_add: Role add
role_remove: Role remove
| 3,684 |
YAML
| 35.127451 | 106 | 0.66721 |
shikimori/shikimori/config/locales/datetime.en.yml
|
en:
date:
formats: &date_formats
full: '%d.%m.%Y %H:%M'
human: '%B %e, %Y'
human_short: '%b %e, %Y'
human_day_month: '%B %e'
human_month_year: '%B %Y'
short: '%d.%m.%Y'
day_month_human: '%B %e'
month_year_human: '%B %Y'
ongoing: '%A, %B %e, %Y'
ongoing_short: '%A, %B %e, %Y'
time:
formats:
<<: *date_formats
#momentjs: '%Y-%m-%d %H:%M:%S'
datetime:
intervals:
today: today
yesterday: yesterday
during_week: during this week
week: one week ago
two_weeks: two weeks ago
three_weeks: three weeks ago
month: one month ago
two_months: two months ago
three_months: three months ago
four_months: four months ago
five_months: five months ago
half_year: half a year ago
year: one year ago
two_years: two years ago
many_years: a long time ago
release_dates:
date: '%{date}'
for_date: for %{date}
in_years: in %{from_date}-%{to_date}
since_date: since %{date}
since_till_date: '%{from_date} to %{to_date}'
till_date: till %{date}
parts:
second:
zero: seconds
one: second
other: seconds
minute:
zero: minutes
one: minute
other: minutes
hour:
zero: hours
one: hour
other: hours
day:
zero: days
one: day
other: days
week:
zero: weeks
one: week
other: weeks
month:
zero: months
one: month
other: months
year:
zero: years
one: year
other: years
| 1,668 |
YAML
| 21.253333 | 51 | 0.506595 |
shikimori/shikimori/config/locales/achievements.ru.yml
|
ru:
achievements:
group:
common: Общие
genre: Жанровые
franchise: Франшизы
author: Авторы
neko_name:
action: Боевик
animelist: Список аниме
comedy: Комедия
dementia_psychological: Безумие / Психологическое
drama: Драма
fantasy: Фэнтези
fujoshi: Фудзёси
gar: ГАР
historical: Историческое
horror_thriller: Хоррор / Триллер
josei: Дзёсей
kids: Детское
kuudere: Кудере
longshounen: Длиннотайтлы
mahou_shoujo: Махо-сёдзё
mecha: Меха
military: Военное
moe: Моэ
music: Музыка
mystery: Детектив
oldfag: Классика
oniichan: Запретная любовь
otaku: Искусствовед
police: Полиция
romance: Романтика
scifi: Фантастика
seinen: Сейнен
shortie: Короткометражки
slice_of_life: Повседневность
sovietanime: Наши в аниме
stop_motion: Покадровая анимация
space: Космос
sports: Спорт
supernatural: Сверхъестественное
test: Неизвестное достижение
tsundere: Цундере
yandere: Яндере
yuuri: Юри
genki: Генки
world_masterpiece_theater: Театр Мировых Шедевров
hint:
default: '%{neko_name} %{level}-го уровня'
animelist: '%{threshold} просмотренных аниме'
| 1,316 |
YAML
| 24.326923 | 55 | 0.634498 |
shikimori/shikimori/config/locales/views.yml
|
ru:
animes:
page:
kind: &video_kinds
raw: оригинал
subtitles: субтитры
fandub: озвучка
unknown: озвучка
new:
kind:
<<: *video_kinds
edit:
kind:
<<: *video_kinds
| 241 |
YAML
| 15.133332 | 27 | 0.481328 |
shikimori/shikimori/config/locales/devise.ru.yml
|
# Русский перевод для https://github.com/plataformatec/devise/tree/v4.3.0
# Другие переводы на http://github.com/plataformatec/devise/wiki/I18n
ru:
devise:
confirmations:
confirmed: "Твой аккаунт подтверждён."
send_instructions: "В течение нескольких минут ты получишь письмо с инструкцией по подтверждению аккаунта."
send_paranoid_instructions: "Если твоя почта есть в базе сайта, то в течение нескольких минут ты получишь письмо с инструкцией по подтверждению аккаунта."
failure:
already_authenticated: "Ты уже в системе."
inactive: "Твой аккаунт ещё не подтверждён."
invalid: "Неверный пароль или: %{authentication_keys}."
locked: "Твой аккаунт заблокирован."
last_attempt: "У тебя осталась ещё одна попытка ввести пароль до блокировки."
not_found_in_database: "Неверный пароль или: %{authentication_keys}."
timeout: "Твой сеанс закончился. Войди в систему снова."
unauthenticated: "Тебе необходимо войти в систему или зарегистрироваться."
unconfirmed: "Тебе нужно подтвердить твой аккаунт."
mailer:
confirmation_instructions:
subject: "Инструкция по подтверждению аккаунта"
reset_password_instructions:
subject: "Инструкция по восстановлению пароля"
unlock_instructions:
subject: "Инструкция по разблокировке аккаунта"
email_changed:
subject: "Почта была изменена"
password_change:
subject: "Пароль был изменён"
omniauth_callbacks:
register: 'Успешная регистрация через аккаунт %{kind}.'
failure: "Ты не можешь войти в систему с аккаунтом из %{kind}, так как \"%{reason}\"."
success: "Вход в систему выполнен с аккаунтом из %{kind}."
passwords:
no_token: "Эта страница доступна только при переходе по ссылке для сброса пароля. Если вы перешли по ссылке из письма, убедитесь, что вы использовали полный URL."
send_instructions: "В течение нескольких минут ты получишь письмо с инструкцией по восстановлению пароля."
send_paranoid_instructions: "Если твоя почта есть в базе сайта, то в течение нескольких минут ты получишь письмо с инструкцией по восстановлению пароля."
updated: "Твой пароль изменён"
updated_not_active: "Пароль изменён"
registrations:
destroyed: "Скатертью дорога! Твой аккаунт удалён."
signed_up: "Добро пожаловать! Регистрация завершена."
signed_up_but_inactive: "Вы зарегистрировались. Тем не менее, вы не можете войти, потому что ваш аккаунт ещё не подтверждён."
signed_up_but_locked: "Вы зарегистрировались. Тем не менее, вы не можете войти, потому что ваш аккаунт забанен."
signed_up_but_unconfirmed: "В течение нескольких минут ты получишь письмо с инструкцией по подтверждению аккаунта."
update_needs_confirmation: "Твой аккаунт обновлён, но необходимо подтвердить твою новую почту. Проверь свою почту и нажми на ссылку \"Подтвердить\", чтобы завершить обновление."
updated: "Твой аккаунт изменён"
sessions:
signed_in: "Вход на сайт выполнен"
signed_out: "Выход из сайта выполнен"
already_signed_out: "Выход из сайта выполнен"
unlocks:
send_instructions: "В течение нескольких минут ты получишь письмо с инструкцией по разблокировке аккаунта."
send_paranoid_instructions: "Если твой аккаунт существует, то в течение нескольких минут ты получишь письмо с инструкцией по его разблокировке."
unlocked: "Ваш аккаунт разблокирован. Теперь вы авторизованы."
user:
already_signed_out: :devise.sessions.signed_out
signed_in: :devise.sessions.signed_in
signed_out: :devise.sessions.signed_out
errors:
messages:
already_confirmed: "уже подтверждена. Пожалуйста, попробуй войти на сайт"
confirmation_period_expired: "должен быть подтверждён в течении %{period}, запроси подтверждение ещё раз"
expired: "устарела. Запроси новую"
not_found: "не найден"
not_locked: "не заблокирован"
not_saved:
one: "%{resource}: сохранение не удалось из-за %{count} ошибки"
few: "%{resource}: сохранение не удалось из-за %{count} ошибок"
many: "%{resource}: сохранение не удалось из-за %{count} ошибок"
other: "%{resource}: сохранение не удалось из-за %{count} ошибки"
| 4,267 |
YAML
| 58.277777 | 183 | 0.718069 |
shikimori/shikimori/config/locales/datetime.ru.yml
|
ru:
date:
formats: &date_formats
full: '%H:%M %d.%m.%Y'
human: '%e %B %Y'
human_short: '%e %b %Y'
human_day_month: '%e %B'
human_month_year: '%B %Y'
short: '%d.%m.%Y'
day_month_human: '%e %B'
month_year_human: '%B %Y'
ongoing: '%A, %e %B %Y'
ongoing_short: '%A, %e %B'
time:
formats:
<<: *date_formats
#momentjs: '%Y-%m-%d %H:%M:%S'
datetime:
intervals:
today: сегодня
yesterday: вчера
during_week: в течение недели
week: неделя назад
two_weeks: две недели назад
three_weeks: три недели назад
month: месяц назад
two_months: два месяца назад
three_months: три месяца назад
four_months: четыре месяца назад
five_months: пять месяцев назад
half_year: более полугода назад
year: год назад
two_years: два года назад
many_years: совсем давно
release_dates:
date: '%{date} г.'
for_date: на %{date} г.
in_years: в %{from_date}-%{to_date} гг.
since_date: с %{date} г.
since_till_date: с %{from_date} г. по %{to_date} г.
till_date: до %{date} г.
parts:
second:
one: секунда
few: секунды
many: секунд
other: секунды
minute:
one: минута
few: минуты
many: минут
other: минуты
hour:
one: час
few: часа
many: часов
other: часа
day:
one: день
few: дня
many: дней
other: дня
week:
one: неделя
few: недели
many: недель
other: недели
month:
one: месяц
few: месяца
many: месяцев
other: месяца
year:
one: год
few: года
many: лет
other: года
| 1,814 |
YAML
| 21.134146 | 57 | 0.503308 |
shikimori/shikimori/config/locales/mailers.ru.yml
|
ru:
shiki_mailer:
private_message_email:
subject: Личное сообщение
body: |-
%{nickname}, у тебя 1 новое сообщение на %{site_link} от пользователя %{from_nickname}.
Прочитать можно тут: %{private_message_link}
Отписаться от уведомлений можно по ссылке:
%{unsubscribe_link}
reset_password_instructions:
subject: Инструкция по сбросу пароля
body: |-
Привет!
Кто-то задействовал процедуру сброса пароля для твоего аккаунта на %{site_link}.
Твой логин - %{nickname}.
Изменить пароль можно, перейдя по ссылке: %{reset_password_link}
Если тебе пришло несколько писем о восстановлении пароля, то переходить на страницу сброса пароля нужно обязательно по ссылке из самого последнего письма.
Если ты не запрашивал(-а) сброс пароля, то просто проигнорируй это письмо.
Твой пароль не будет изменён до тех пор, пока ты не перейдёшь по указанной выше ссылке.
| 976 |
YAML
| 32.689654 | 162 | 0.683402 |
shikimori/shikimori/config/locales/simple_form.en.yml
|
en:
simple_form:
yes: Yes
no: No
required:
text: 'required'
mark: '*'
# You can uncomment the line below if you need to overwrite the whole required html.
# When using html, text and mark won't be used.
# html: '<abbr title="required">*</abbr>'
error_notification:
default_message: "Please critique the problems below:"
# Labels and hints examples
# labels:
# defaults:
# password: 'Password'
# user:
# new:
# email: 'E-mail to sign in.'
# edit:
# email: 'E-mail.'
# hints:
# defaults:
# username: 'User name to sign in.'
# password: 'No special characters, please.'
labels:
user:
nickname: Login (nickname)
anime_video:
author_name: Author (dubbing, subtitles)
anime_video_author_id: Author (dubbing, subtitles)
placeholders:
topic:
title: Topic title
options:
topic:
type:
Topic: Topic
Topics::NewsTopic: News topic
user:
sex:
male: male
female: female
hints:
user:
nickname: Case sensitive
password: Case sensitive
email: Case sensitive
user_preferences:
apply_user_styles: >-
Other site users can define their own styles
(change the appearance of the site) for the pages of their profile and
clubs. If you disable this setting, you will always see standard site
style.
favorites_in_profile: >-
Changing it will break your default profile layout.
<br>You may want to change it if you have custom styles in profile.
version:
reason: &optional Optional
anime: &anime_hints
description_ru_source: *optional
description_en_source: *optional
episodes: It must be "0" for ongoings with an unknown total number of episodes
more_info: >-
Text imported from MAL is not displayed until the marker
<code class="b-code_inline">[MAL]</code> is removed from the text
manga:
<<: *anime_hints
volumes: It must be "0" for ongoings with an unknown total number of volumes
chapters: It must be "0" for ongoings with an unknown total number of chapters
anime_video:
author_name: >-
Формат записи: Название_проекта/студии (Ник_даббера1 & Ник_даббера2)
list_import:
list: Import supports Shikimori JSON and MyAnimeList XML lists (15mb max)
club:
is_censored: Required option for clubs with "adult" images and texts
is_private: Club content is only visible to club members and moderators
is_non_thematic: Non thematic clubs are not displayed on anime and manga pages
is_shadowbanned: Hides the club from everyone except its members
club_page:
parent_page_id: Inside which page the page is displayed
magic_submit:
devise:
sessions:
new: &sign_in
submit: Sign in
disable_with: Signing in…
users:
registrations:
new:
submit: Register
disable_with: Registering…
passwords:
new:
submit: Send instructions
disable_with: Sending instructions…
sessions:
new:
<<: *sign_in
dashboards:
show:
<<: *sign_in
club_invite: &send
submit: Send
disable_with: Sending…
comment: &comment
submit: Send
disable_with: Posting…
message:
<<: *comment
anime_video_report:
index:
<<: *send
list_import:
submit: Import
disable_with: Importing…
default:
submit: Save
retry: Try saving once again
disable_with: Saving…
new:
submit: Create
retry: Try again
edit:
submit: Save
feedback:
<<: *comment
| 4,093 |
YAML
| 24.116564 | 90 | 0.571463 |
shikimori/shikimori/config/locales/mailers.en.yml
|
en:
shiki_mailer:
private_message_email:
subject: Private message
body: |-
%{nickname}, you have 1 new message on %{site_link} from %{from_nickname}.
Read the message: %{private_message_link}
To unsubscribe from notification emails click here:
%{unsubscribe_link}
reset_password_instructions:
subject: Reset password instructions
body: |-
Hi!
We have received a request to reset your account password on %{site_link}.
Your account login is %{nickname}.
To reset you password click this link: %{reset_password_link}
If you didn't make a request to reset your password just ignore this message.
Your password will not change until you click the link above.
| 777 |
YAML
| 27.814814 | 85 | 0.65251 |
shikimori/shikimori/config/locales/simple_form.ru.yml
|
ru:
simple_form:
yes: Да
no: Нет
required:
text: 'Обязательное поле'
mark: '*'
# You can uncomment the line below if you need to overwrite the whole required html.
# When using html, text and mark won't be used.
# html: '<abbr title="required">*</abbr>'
error_notification:
default_message: "Пожалуйста, исправьте следующие ошибки:"
# Labels and hints examples
# labels:
# defaults:
# password: 'Password'
# user:
# new:
# email: 'E-mail to sign in.'
# edit:
# email: 'E-mail.'
# hints:
# defaults:
# username: 'User name to sign in.'
# password: 'No special characters, please.'
labels:
user:
nickname: Логин (никнейм)
anime_video:
author_name: Автор (озвучки, субтитров)
anime_video_author_id: Автор (озвучки, субтитров)
placeholders:
topic:
title: Название топика
options:
topic:
type:
Topic: Топик
Topics::NewsTopic: Новостной топик
user:
sex:
male: муж.
female: жен.
hints:
user:
nickname: Чувствителен к регистру
password: Чувствителен к регистру
email: >-
Чувствителен к регистру<br>
Письма на<span class="b-tag narrow">@mail.ru</span><span class="b-tag narrow">@inbox.ru</span><span class="b-tag narrow">@list.ru</span><span class="b-tag narrow">@bk.ru</span>
могут попадать в спам, проверяй в этой папке тоже.
user_preferences:
apply_user_styles: >-
Другие пользователи сайта могут задавать собственные стили
(изменять внешний вид сайта) для страниц своего профиля и
клубов.<br>Отключив эту настройку, вы всегда будете видеть
стандартный стиль сайта.
favorites_in_profile: >-
Изменение этой настройки поломает стандартную вёрстку профиля.
<br>Можно менять, если у тебя собственные стили в профиле.
version:
reason: &optional Не обязательно
anime: &anime_hints
description_ru_source: *optional
description_en_source: *optional
episodes: Для онгоингов с неизвестным числом эпизодов ставь "0"
more_info: >-
Импортированные с MAL тексты не отображаются, пока из текста не удалён маркер
<code class="b-code_inline">[MAL]</code>
manga:
<<: *anime_hints
volumes: Для онгоингов с неизвестным числом томов ставь "0"
chapters: Для онгоингов с неизвестным числом глав ставь "0"
anime_video:
author_name: >-
Формат записи: Название_проекта/студии (Ник_даббера1 & Ник_даббера2)
list_import:
list: Поддерживает Shikimori JSON и MyAnimeList XML списки (до 15mb)
club:
is_censored: Обязательная настройка для клубов со "взрослыми" картинками и текстами
is_private: Содержимое клуба видно только участникам клуба и модераторам
is_non_thematic: Не тематические клубы не отображаются на страницах аниме и манги
is_shadowbanned: Скрывает клуб для всех, кроме его участников
club_page:
parent_page_id: Внутри какой страницы отображается страница
magic_submit:
devise:
sessions:
new: &sign_in
submit: Войти
disable_with: Вход…
users:
registrations:
new:
submit: Зарегистрироваться
disable_with: Регистрация…
passwords:
new:
submit: Отправить инструкцию
disable_with: Отправляем инструкцию…
sessions:
new:
<<: *sign_in
dashboards:
dynamic:
<<: *sign_in
club_invite: &send
submit: Отправить
disable_with: Отправка…
comment: &comment
submit: Написать
disable_with: Отправка…
message:
<<: *comment
anime_video_report:
index:
<<: *send
list_import:
submit: Импортировать
disable_with: Импорт…
default:
submit: Сохранить
retry: Попробовать ещё раз
disable_with: Сохранение…
new:
submit: Создать
edit:
submit: Сохранить
feedback:
<<: *comment
helpers:
submit:
user: &user_buttons
create: Сохранить
update: Сохранить
user_preferences:
<<: *user_buttons
topic:
<<: *user_buttons
critique:
<<: *user_buttons
| 4,607 |
YAML
| 27.269938 | 186 | 0.585196 |
shikimori/shikimori/config/locales/frontend/edit_field.ru.yml
|
ru:
frontend:
synonyms: &synonyms
nothing_here: Нет названий
name: Название
licensors:
<<: *synonyms
coub_tags:
<<: *synonyms
fansubbers:
<<: *synonyms
fandubbers:
<<: *synonyms
desynced:
nothing_here: Нет поля
name: Название поля
options:
nothing_here: Нет настроек
name: Настройка
| 373 |
YAML
| 17.699999 | 32 | 0.571046 |
shikimori/shikimori/config/locales/frontend/about.ru.yml
|
ru:
frontend:
about:
views: Просмотры
visits: Посещения
unique_visitors: Уникальные посетители
comments_per_day: Комментариев за день
new_users_per_day: Новых пользователей за день
| 217 |
YAML
| 23.22222 | 52 | 0.682028 |
shikimori/shikimori/config/locales/frontend/search.ru.yml
|
ru:
frontend:
search:
nothing_found: Ничего не найдено.
mode:
index: Текущая страница
anime: Аниме
manga: Манга
ranobe: Ранобэ
character: Персонаж
person: Человек
| 229 |
YAML
| 18.166665 | 39 | 0.563319 |
shikimori/shikimori/config/locales/frontend/collections.en.yml
|
en:
frontend:
collections:
kind:
anime: Anime
manga: Manga
ranobe: Light Novel and Novel
character: Characters
person: People
group_name: Group name
disabled_add_group_hint: >-
To add a new group, fill in the blank group name
json_warning: >-
Do not edit this if you are not sure what you are doing!
Inserting invalid data will break the page.
autocomplete:
anime: Anime title
manga: Manga title
ranobe: Light Novel title
character: Character name
person: Person name
| 608 |
YAML
| 26.681817 | 64 | 0.601974 |
shikimori/shikimori/config/locales/frontend/pages.en.yml
|
en:
frontend:
pages:
# p-animes.coffee
p_animes:
hentai: Hentai / Roskomnadzor
licensed: Licensed in Russia
no_data: No data
watch_online: Watch Online
# p-contests
p_contests:
# p-contests/_form.coffee
candidate:
one: '%{count} candidate'
few: '%{count} candidates'
many: '%{count} candidates'
other: '%{count} candidates'
# p-profiles
p_profiles:
# p-profiles/ban.coffee
page_is_reloading: Page is reloading...
# p-profiles/show.coffee
hour:
one: hour
few: hours
many: hours
day:
one: day
few: days
many: days
label:
full: >-
%{hours} %{hourWord} since %{fromDate} till %{toDate}
(%{days} %{dayWord})
short: >-
%{hours} %{hourWord} on %{date}
# p-user_rates
p_user_rates:
# p-user_rates/index.coffee
insufficient_data: Insufficient data
error_occurred: Error occurred
changes_saved: Changes saved
rewatch:
one: rewatch
few: rewatches
many: rewatches
reread:
one: re-read
few: re-reads
many: re-reads
# p-recommendations-index.coffee
p_recommendations_index:
dont_recommend_franchise: Don't recommend this franchise any more
| 1,465 |
YAML
| 23.847457 | 73 | 0.517406 |
shikimori/shikimori/config/locales/frontend/collections.ru.yml
|
ru:
frontend:
collections:
kind:
anime: Аниме
manga: Манга
ranobe: Ранобэ и новеллы
character: Персонажи
person: Люди
group_name: Название группы
disabled_add_group_hint: >-
Для добавления следующей группы заполните пустое название группы
json_warning: >-
Не редактируй это, если не уверен(-а) в том, что делаешь!
Вставка неправильных данных сломает работу страницы.
autocomplete:
anime: Название аниме
manga: Название манги
ranobe: Название ранобэ
character: Имя персонажа
person: Имя человека
| 635 |
YAML
| 27.90909 | 72 | 0.626772 |
shikimori/shikimori/config/locales/frontend/statistics.ru.yml
|
ru:
frontend:
statistics:
number: Количество
anime_with_score: <b>%{count}</b> аниме с оценкой <b>%{score}</b>
anime_of_type: <b>%{count}</b> аниме типа <b>%{type}</b>
anime_in_year: '%{count} %{type} за %{year} год'
anime_with_rating_in_year: '%{count} аниме у %{rating} за %{year} год'
share: Процент
ratings_share: '%{percent}% у %{rating} за %{year} год'
genres_share: '%{percent}% у %{genre} за %{year} год'
| 467 |
YAML
| 37.999997 | 76 | 0.569593 |
shikimori/shikimori/config/locales/frontend/achievements.en.yml
|
en:
frontend:
achievements:
title:
gained: Achievement Gained
lost: Achievement Lost
| 113 |
YAML
| 15.285712 | 34 | 0.610619 |
shikimori/shikimori/config/locales/frontend/pages.ru.yml
|
ru:
frontend:
pages:
# p-animes.coffee
p_animes:
hentai: Хентай / Роскомнадзор
licensed: Лицензировано в РФ
no_data: Нет данных
watch_online: Смотреть онлайн
# p-contests
p_contests:
# p-contests/_form.coffee
candidate:
one: '%{count} участник'
few: '%{count} участника'
many: '%{count} участников'
other: '%{count} участников'
# p-profiles
p_profiles:
# p_profiles/ban.coffee
page_is_reloading: Перезагрузка страницы...
# p-profiles/show.coffee
hour:
one: час
few: часа
many: часов
day:
one: день
few: дня
many: дней
label:
full: >-
%{hours} %{hourWord} с %{fromDate} по %{toDate}
(%{days} %{dayWord})
short: >-
%{hours} %{hourWord} %{date}
# p_user-rates
p_user_rates:
# p_user-rates/index.coffee
insufficient_data: Недостаточно данных
error_occurred: Произошла ошибка
changes_saved: Изменения сохранены
rewatch:
one: повторный просмотр
few: повторных просмотра
many: повторных просмотров
reread:
one: повторное прочтение
few: повторных прочтения
many: повторных прочтений
# p-recommendations-index.coffee
p_recommendations_index:
dont_recommend_franchise: Больше не рекомендовать эту франшизу
| 1,535 |
YAML
| 25.033898 | 70 | 0.540065 |
shikimori/shikimori/config/locales/frontend/search.en.yml
|
en:
frontend:
search:
nothing_found: Nothing found.
mode:
index: This page
anime: Anime
manga: Manga
ranobe: Light novel
character: Character
person: Person
| 223 |
YAML
| 17.666665 | 35 | 0.55157 |
shikimori/shikimori/config/locales/frontend/external_links.ru.yml
|
ru:
frontend:
external_links:
nothing_here: Нет ссылок
groups:
links: Ссылки
watch_online: Онлайн-просмотр
warn:
youtube: Ссылка на официальный канал, где выложены серии для онлайн-просмотра
watch_online: Ссылка на плейлист/страницу с плеером/официальный канал с онлайн-просмотром
| 337 |
YAML
| 29.72727 | 97 | 0.682493 |
shikimori/shikimori/config/locales/frontend/about.en.yml
|
en:
frontend:
about:
views: Views
visits: Visits
unique_visitors: Unique visitors
comments_per_day: Comments per day
new_users_per_day: New users per day
| 190 |
YAML
| 20.22222 | 42 | 0.636842 |
shikimori/shikimori/config/locales/frontend/shiki_editor.en.yml
|
en:
frontend:
shiki_editor:
not_available: Commenting will be available one day after registering
text_cant_be_blank: Text can't be blank
file: file
bold: Bold
italic: Italic
underline: Underlined
strike: Strikethrough
color: Color
undo: Undo last change
redo: Redo last change
spoiler: Spoiler
spoiler_inline: Spoiler
code_inline: Code
link: Link
smiley: Smiley
shiki_link: Shiki link
image: Image by link
upload: Images upload
spoiler_block: Spoiler block
code_block: Code block
bullet_list: List
headline: Headline
blockquote: Quote
prompt:
image_url: Image URL
link_url: Link URL
spoiler_label: Spoiler label
preview: Preview
source: Source code
huge_content_mode: Text is too large. The visual editor will fail and has therefore been disabled.
huge_content_pasted: Text is too large. The visual editor will fail and therefore paste has been cancelled.
normal_content_mode: The visual editor is available again
unsaved_content:
label: The editor has unsaved draft. Restore it?
draft: Draft
'yes': Yes
'no': No
colors:
yellow: Yellow
orange: Orange
red: Red
pink: Pink
violet: Violet
blue: Blue
green: Green
brown: Brown
gray: Gray
black: Black
headlines:
header_1: '# Large header'
header_2: '## Medium header'
header_3: '### Small header'
headline: '#### Headline'
midheadline: '##### Subheadline'
| 1,677 |
YAML
| 27.931034 | 113 | 0.60167 |
shikimori/shikimori/config/locales/frontend/images.ru.yml
|
ru:
frontend:
images:
delete: Удалить
confirm: Подтвердить
cancel: Отменить
| 100 |
YAML
| 13.42857 | 26 | 0.61 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.