desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Transform a sequence of int ids into a human-readable string. EOS is not expected in ids. Args: ids: list of integers to be converted. Returns: s: human-readable string.'
def decode(self, ids):
decoded_ids = [] for id_ in ids: if (0 <= id_ < self._num_reserved_ids): decoded_ids.append(RESERVED_TOKENS[int(id_)]) else: decoded_ids.append((id_ - self._num_reserved_ids)) return ' '.join([str(d) for d in decoded_ids])
'Initialize from a file or list, one token per line.'
def __init__(self, vocab_filename, reverse=False, vocab_list=None, num_reserved_ids=NUM_RESERVED_TOKENS):
super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) self._reverse = reverse if vocab_filename: self._init_vocab_from_file(vocab_filename) else: assert (vocab_list is not None) self._init_vocab_from_list(vocab_list)
'Converts a space-separated string of tokens to a list of ids.'
def encode(self, sentence):
ret = [self._token_to_id[tok] for tok in sentence.strip().split()] return (ret[::(-1)] if self._reverse else ret)
'Load vocab from a file.'
def _init_vocab_from_file(self, filename):
def token_gen(): with tf.gfile.Open(filename) as f: for line in f: token = line.strip() (yield token) self._init_vocab(token_gen())
'Initialize vocabulary with tokens from token_generator.'
def _init_vocab(self, token_generator):
self._id_to_token = {} self._id_to_token.update(dict(list(enumerate(RESERVED_TOKENS)))) token_id = len(RESERVED_TOKENS) for token in token_generator: self._id_to_token[token_id] = token token_id += 1 self._token_to_id = dict([(v, k) for (k, v) in six.iteritems(self._id_to_token)])
'Initialize and read from a file, if provided.'
def __init__(self, filename=None):
self._alphabet = set() if (filename is not None): self._load_from_file(filename) super(SubwordTextEncoder, self).__init__(num_reserved_ids=None)
'Converts a native string to a list of subtoken ids. Args: raw_text: a native string. Returns: a list of integers in the range [0, vocab_size)'
def encode(self, raw_text):
return self._tokens_to_subtoken_ids(tokenizer.encode(native_to_unicode(raw_text)))
'Converts a sequence of subtoken ids to a native string. Args: subtokens: a list of integers in the range [0, vocab_size) Returns: a native string'
def decode(self, subtokens):
return unicode_to_native(tokenizer.decode(self._subtoken_ids_to_tokens(subtokens)))
'The subtoken vocabulary size.'
@property def vocab_size(self):
return len(self._all_subtoken_strings)
'Converts a list of tokens to a list of subtoken ids. Args: tokens: a list of strings. Returns: a list of integers in the range [0, vocab_size)'
def _tokens_to_subtoken_ids(self, tokens):
ret = [] for token in tokens: ret.extend(self._escaped_token_to_subtoken_ids(_escape_token(token, self._alphabet))) return ret
'Converts a list of subtoken ids to a list of tokens. Args: subtokens: a list of integers in the range [0, vocab_size) Returns: a list of strings.'
def _subtoken_ids_to_tokens(self, subtokens):
concatenated = ''.join([self._subtoken_id_to_subtoken_string(s) for s in subtokens]) split = concatenated.split('_') return [_unescape_token((t + '_')) for t in split if t]
'Converts a subtoken integer ID to a subtoken string.'
def _subtoken_id_to_subtoken_string(self, subtoken):
if (0 <= subtoken < self.vocab_size): return self._all_subtoken_strings[subtoken] return u''
'Converts an escaped token string to a list of subtoken strings. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtokens as unicode strings.'
def _escaped_token_to_subtoken_strings(self, escaped_token):
ret = [] start = 0 token_len = len(escaped_token) while (start < token_len): for end in xrange(min(token_len, (start + self._max_subtoken_len)), start, (-1)): subtoken = escaped_token[start:end] if (subtoken in self._subtoken_string_to_id): ret.append(subt...
'Converts an escaped token string to a list of subtoken IDs. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtoken IDs as integers.'
def _escaped_token_to_subtoken_ids(self, escaped_token):
return [self._subtoken_string_to_id[subtoken] for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)]
'Builds a SubwordTextEncoder that has `vocab_size` near `target_size`. Uses simple recursive binary search to find a minimum token count that most closely matches the `target_size`. Args: target_size: Desired vocab_size to approximate. token_counts: A dictionary of token counts, mapping string to int. min_val: An integ...
@classmethod def build_to_target_size(cls, target_size, token_counts, min_val, max_val, num_iterations=4):
if (min_val > max_val): raise ValueError('Lower bound for the minimum token count is greater than the upper bound.') def bisect(min_val, max_val): 'Bisection to find the right size.' present_count = ((max_val + min_val) // 2) tf....
'Train a SubwordTextEncoder based on a dictionary of word counts. Args: token_counts: a dictionary of Unicode strings to int. min_count: an integer - discard subtokens with lower counts. num_iterations: an integer. how many iterations of refinement. num_reserved_ids: an integer. how many ids to reserve for special to...
def build_from_token_counts(self, token_counts, min_count, num_iterations=4, num_reserved_ids=NUM_RESERVED_TOKENS):
self._init_alphabet_from_tokens(six.iterkeys(token_counts)) self._init_subtokens_from_list(list(self._alphabet), reserved=num_reserved_ids) if (min_count < 1): min_count = 1 for i in xrange(num_iterations): tf.logging.info('Iteration {0}'.format(i)) subtoken_counts = collectio...
'Debugging dump of the current subtoken vocabulary.'
def dump(self):
subtoken_strings = [(i, s) for (s, i) in six.iteritems(self._subtoken_string_to_id)] print(u', '.join((u"{0} : '{1}'".format(i, s) for (i, s) in sorted(subtoken_strings))))
'Initialize token information from a list of subtoken strings.'
def _init_subtokens_from_list(self, subtoken_strings, reserved=0):
self._all_subtoken_strings = (([u''] * reserved) + subtoken_strings) self._max_subtoken_len = max([len(s) for s in subtoken_strings]) self._subtoken_string_to_id = {s: (i + reserved) for (i, s) in enumerate(subtoken_strings) if s}
'Initialize alphabet from an iterable of token or subtoken strings.'
def _init_alphabet_from_tokens(self, tokens):
self._alphabet = {c for token in tokens for c in token} self._alphabet |= _ESCAPE_CHARS
'Load from a file.'
def _load_from_file(self, filename):
subtoken_strings = [] with tf.gfile.Open(filename) as f: for line in f: subtoken_strings.append(native_to_unicode(line.strip()[1:(-1)])) self._init_subtokens_from_list(subtoken_strings) self._init_alphabet_from_tokens(subtoken_strings)
'Number of float predictions per timestep.'
@property def num_output_predictions(self):
return 10
'Generator; takes 3 args: nbr_symbols, max_length, nbr_cases.'
@property def train_generator(self):
def _gen(nbr_symbols, max_length, nbr_cases): plain_vocab = range(nbr_symbols) indices = generate_plaintext_random(plain_vocab, self.distribution, nbr_cases, max_length) codes = encipher_shift(indices, plain_vocab, self.shift) for (plain, code) in zip(indices, codes): (yi...
'Generator; takes 3 args: nbr_symbols, max_length, nbr_cases.'
@property def train_generator(self):
def _gen(nbr_symbols, max_length, nbr_cases): plain_vocab = range(nbr_symbols) indices = generate_plaintext_random(plain_vocab, self.distribution, nbr_cases, max_length) codes = encipher_vigenere(indices, plain_vocab, self.key) for (plain, code) in zip(indices, codes): (y...
'Initialize shift layer. Args: vocab: (list of String) the vocabulary shift: (Integer) the amount of shift apply to the alphabet. Positive number implies shift to the right, negative number implies shift to the left.'
def __init__(self, vocab, shift):
self.shift = shift alphabet = vocab shifted_alphabet = deque(alphabet) shifted_alphabet.rotate(shift) self.encrypt = dict(zip(alphabet, list(shifted_alphabet))) self.decrypt = dict(zip(list(shifted_alphabet), alphabet))
'Check if name is in orig_ctr or in one of the other type containers.'
def _check_reset_and_type_change(self, name, orig_ctr):
if (name in orig_ctr): tf.logging.warning('Overwriting hparam %s', name) ctr_names = [(self._categorical_params, 'categorical'), (self._discrete_params, 'discrete'), (self._float_params, 'float'), (self._int_params, 'int')] (ctrs, names) = list(zip(*ctr_names)) orig_name = names[ctrs.index...
'Create or get concatenated embedding or softmax variable. Returns: a list of self._num_shards Tensors.'
def _get_weights(self):
num_shards = self._model_hparams.symbol_modality_num_shards shards = [] for i in xrange(num_shards): shard_size = ((self._vocab_size // num_shards) + (1 if (i < (self._vocab_size % num_shards)) else 0)) var_name = ('weights_%d' % i) shards.append(tf.get_variable(var_name, [shard_size...
'Generate logits. Args: body_output: A Tensor with shape [batch, p0, p1, body_input_depth] Returns: logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].'
def top(self, body_output, _):
if self._model_hparams.shared_embedding_and_softmax_weights: scope_name = 'shared' reuse = True else: scope_name = 'softmax' reuse = False with tf.variable_scope(scope_name, reuse=reuse): var = self._get_weights() shape = tf.shape(body_output)[:(-1)] b...
'Transform input from data space to model space. Perform the Xception "Entry flow", which consists of two convolutional filter upscalings followed by three residually connected separable convolution blocks. Args: inputs: A Tensor with shape [batch, ...] Returns: body_input: A Tensor with shape [batch, ?, ?, body_input_...
def bottom(self, inputs):
with tf.variable_scope(self.name): def xnet_resblock(x, filters, res_relu, name): with tf.variable_scope(name): y = common_layers.separable_conv_block(x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding='SAME', force2d=True, name='sep_conv_block') ...
'Transform input from data space to model space. Args: inputs: A Tensor with shape [batch, ...] Returns: body_input: A Tensor with shape [batch, ?, ?, body_input_depth].'
def bottom(self, inputs):
with tf.variable_scope(self.name): def xnet_resblock(x, filters, res_relu, name): with tf.variable_scope(name): y = common_layers.separable_conv_block(x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding='SAME', force2d=True, name='sep_conv_block') ...
'Transform input from data space to model space. Args: inputs: A Tensor with shape [batch, ...] Returns: body_input: A Tensor with shape [batch, ?, ?, body_input_depth].'
def bottom(self, inputs):
with tf.variable_scope(self.name): def xnet_resblock(x, filters, res_relu, name): with tf.variable_scope(name): y = common_layers.separable_conv_block(x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding='SAME', force2d=True, name='sep_conv_block') ...
'Transform inputs from model space to target space. Perform the Xception "Exit flow", consisting of a single residual block and two separable convolutional upscalings followed by global spatial average pooling. Args: body_output: A Tensor with shape [batch, ?, ?, body_output_size]. Returns: a Tensors, each with shape [...
def top(self, body_output, _):
with tf.variable_scope(self.name): x = body_output if self._is_2d: length_float = tf.to_float(tf.shape(x)[1]) spatial_dim_float = tf.sqrt(length_float) spatial_dim = tf.to_int32(spatial_dim_float) x = tf.reshape(x, [(-1), spatial_dim, spatial_dim, self...
'Create a T2TModel. Args: hparams: a hyperparameters object. mode: The execution mode, as defined in tf.contrib.learn.ModeKeys. problem_hparams: a hyperparameters object. problem_idx: an integer. data_parallelism: a expert_utils.parallelism (specifies devices for data parallelism). ps_devices: a list of devices to be u...
def __init__(self, hparams, mode, problem_hparams, problem_idx=0, data_parallelism=None, ps_devices=None):
if (data_parallelism is None): data_parallelism = eu.Parallelism(['']) if (ps_devices is None): ps_devices = [''] hparams = copy.copy(hparams) hparams.add_hparam('mode', mode) if (mode != tf.contrib.learn.ModeKeys.TRAIN): for key in hparams.values(): if (key[(- le...
'Construct modalities in problem_hparams.'
def _create_modalities(self, problem_hparams, hparams):
input_modality_overrides = {} for override_str in hparams.input_modalities.split(';'): if (override_str != 'default'): parts = override_str.split(':') feature_name = parts[0] modality_name = ':'.join(parts[1:]) input_modality_overrides[feature_name] = moda...
'Autoregressive eval. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. last_position_only: a boolean, speed-up by computing last position only. Returns: sharded_logits: a list of `Tensor`s. Assumes one datashard. losses: a...
def eval_autoregressive(self, features=None, decode_length=50, last_position_only=False):
(_, logits, losses) = self._greedy_infer(features, decode_length=decode_length, last_position_only=last_position_only) return ([logits], losses)
'A inference method. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. last_position_only: a boolean, speed-up by computing last position o...
def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, last_position_only=False, alpha=0.0):
if (not self.has_input): beam_size = 1 self._hparams.sampling_method = 'random' if _is_class_modality(self._hparams.problems[self._problem_idx].target_modality): beam_size = 1 if (beam_size == 1): tf.logging.info('Greedy Decoding') (samples, _, _) = self._greedy_in...
'Beam search decoding. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. last_position_only: a boolean, speed-up by computing last position only. alpha: Float that controls...
def _beam_decode(self, features, decode_length, beam_size, top_beams, last_position_only, alpha):
def symbols_to_logits_fn(ids): 'Go from ids to logits.' ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]]) features['targets'] = ids self._coverage = None (sharded_logits, _) = self.model_fn...
'A slow greedy inference method. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. last_position_only: a boolean, speed-up by computing last position only. Returns: samples: an integer `Tensor`. logits: `Tensor` of shape [b...
def _greedy_infer(self, features, decode_length, last_position_only):
if (not features): features = {} inputs_old = None if (('inputs' in features) and (len(features['inputs'].shape) < 4)): inputs_old = features['inputs'] features['inputs'] = tf.expand_dims(features['inputs'], 2) if (not self.has_input): features['partial_targets'] = tf.to_...
'Run the model and extract samples. Args: features: an map of string to `Tensor`. last_position_only: a boolean, speed-up by computing last position only. Returns: samples: an integer `Tensor`. logits: a list of `Tensor`s, one per datashard. losses: a dictionary: {loss-name (string): floating point `Scalar`}.'
def sample(self, features, last_position_only=False):
(sharded_logits, losses) = self.model_fn(features, False, last_position_only=last_position_only) if (self._hparams.sampling_method == 'argmax'): sharded_samples = self._data_parallelism(tf.argmax, sharded_logits, 4) else: assert (self._hparams.sampling_method == 'random') def _multin...
'Computes the entire model and produces sharded logits and losses. Args: features: A dictionary of feature name to tensor. skip: a boolean, if we\'re just dummy-calling and actually skip this model (but we need to create variables to not confuse distributed training). last_position_only: a boolean, compute logits for o...
def model_fn(self, features, skip=False, last_position_only=False):
start_time = time.time() dp = self._data_parallelism sharded_features = self._shard_features(features) transformed_features = {} all_previous_modalities = [] for (key, input_modality) in six.iteritems(self._problem_hparams.input_modality): previous_modalities = [self._hparams.problems[i]...
'Mixture-of-experts models will override this function. Compute model body on all datashards. Args: sharded_features: map from string to list of Tensors each with shape [batch, ?, ?, body_input_size] Returns: sharded_body_output: a list of Tensors, each with shape [batch, O, P, body_output_size] extra_loss: a Scalar.'
def model_fn_body_sharded(self, sharded_features):
with tf.name_scope('model'): datashard_to_features = [{k: v[d] for (k, v) in six.iteritems(sharded_features)} for d in xrange(self._num_datashards)] output = self._data_parallelism(_with_timing(self.model_fn_body, 'model_fn_body'), datashard_to_features) if isinstance(output, tuple): ...
'Most models will override this function. Compute label logits for one shard as a function of the transformed features. Args: features: A dictionary of key to Tensor. Each Tensor has shape [batch_size, ?, ?, hidden_size]. Returns: output: tensor of logits with shape [batch_size, O, P, body_output_size. losses: either ...
def model_fn_body(self, features):
raise NotImplementedError('Abstract Method')
'Construct a new YellowFin optimizer. Args: learning_rate: A Tensor or a floating point value. The learning rate. momentum: A Tensor or a floating point value. The momentum. clip_thresh: A Tensor or a floating point value. The cliping threshold for tf.clip_by_global_norm. If None, no clipping will be carried out. be...
def __init__(self, learning_rate=1.0, momentum=0.0, clip_thresh=None, beta=0.999, curvature_window_width=20, zero_debias=True, delta_mu=0.0):
self._lr = learning_rate self._mu = momentum self._lr_var = tf.Variable(learning_rate, dtype=tf.float32, name='YF_lr', trainable=False) self._mu_var = tf.Variable(momentum, dtype=tf.float32, name='YF_mu', trainable=False) self.lr_factor = tf.Variable(1.0, dtype=tf.float32, name='YF_lr_factor', train...
'Curvature range. Returns: h_max_t, h_min_t ops'
def _curvature_range(self):
self._curv_win = tf.Variable(np.zeros([self.curvature_window_width]), dtype=tf.float32, name='curv_win', trainable=False) self._curv_win = tf.scatter_update(self._curv_win, (self._step % self.curvature_window_width), self._grad_norm_squared) valid_window = tf.slice(self._curv_win, tf.constant([0]), tf.expan...
'Estimate of gradient Variance. Returns: C_t ops.'
def _grad_variance(self):
grad_var_ops = [] tensor_to_avg = [] for (t, g) in zip(self._vars, self._grad): if isinstance(g, tf.IndexedSlices): tensor_to_avg.append(tf.reshape(tf.unsorted_segment_sum(g.values, g.indices, g.dense_shape[0]), shape=t.get_shape())) else: tensor_to_avg.append(g) ...
'Distance to optimum. Returns: D_t ops'
def _dist_to_opt(self):
dist_to_opt_ops = [] self._grad_norm = tf.sqrt(self._grad_norm_squared) avg_op = self._moving_averager.apply([self._grad_norm]) dist_to_opt_ops.append(avg_op) with tf.control_dependencies([avg_op]): self._grad_norm_avg = self._moving_averager.average(self._grad_norm) self._d_t = (sel...
'Prepare Variables for YellowFin. Returns: Grad**2, Norm, Norm**2, Mean(Norm**2) ops'
def _prepare_variables(self):
self._moving_averager = tf.train.ExponentialMovingAverage(decay=self._beta, zero_debias=self._zero_debias) assert self._grad prepare_variables_op = [] self._grad_squared = [] self._grad_norm_squared = [] for (v, g) in zip(self._vars, self._grad): if (g is None): continue ...
'Get lr minimzing the surrogate. Returns: The lr_t.'
def _get_lr_tensor(self):
lr = (((1.0 - tf.sqrt(self._mu)) ** 2) / self._h_min) return lr
'Get the min mu which minimize the surrogate. Returns: The mu_t.'
def _get_mu_tensor(self):
const_fact = ((((self._dist_to_opt_avg ** 2) * (self._h_min ** 2)) / 2) / self._grad_var) coef = tf.Variable([(-1.0), 3.0, 0.0, 1.0], dtype=tf.float32, name='cubic_solver_coef') coef = tf.scatter_update(coef, tf.constant(2), (- (3 + const_fact))) roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, s...
'YellowFin auto-tuning optimizer based on momentum SGD. Returns: YF ops (Curvature range, Grad_variance, Dist_to_opt, Single-Step, Auto-Tuning)'
def _yellowfin(self):
yellowfin_ops = [] curv_range_ops = self._curvature_range() yellowfin_ops += curv_range_ops grad_var_ops = self._grad_variance() yellowfin_ops += grad_var_ops dist_to_opt_ops = self._dist_to_opt() yellowfin_ops += dist_to_opt_ops self._mu = tf.identity(tf.cond(self._do_tune, self._get_mu...
'Applying gradients aand tune hyperparams with YellowFin. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to ...
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
(self._grad, self._vars) = zip(*[(g, t) for (g, t) in grads_and_vars if (g is not None)]) with tf.variable_scope('apply_updates'): if (self._clip_thresh_var is not None): (self._grads_clip, self._grads_norm) = tf.clip_by_global_norm(self._grad, self._clip_thresh_var) apply_grad_o...
'Compute gradients through momentum optimizer. Args: loss: A Tensor containing the value to minimize. var_list: Optional list or tuple of tf.Variable to update to minimize loss. Defaults to the list of variables collected in the graph under the key GraphKey.TRAINABLE_VARIABLES. global_step: Optional Variable to increme...
def compute_gradients(self, loss, var_list, global_step=None, gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None):
return self._momentum_optimizer.compute_gradients(loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss)
'Adapted from Tensorflow Optimizer base class member function. Add operations to minimize `loss` by updating `var_list`. This method simply combines calls `compute_gradients()` and `apply_gradients()`. If you want to process the gradient before applying them call `tf.gradients()` and `self.apply_gradients()` explicitly...
def minimize(self, loss, global_step=None, var_list=None, gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None):
grads_and_vars = self._optimizer.compute_gradients(loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss) vars_with_grad = [v for (g, v) in grads_and_vars if (g is not None)] if (not vars_wi...
'Integer, the last dimension of the predictions (vocab size).'
@property def top_dimensionality(self):
raise NotImplementedError('Abstract Method')
'Transform one shard of input. Args: x: An int32 Tensor with shape [batch, p0, p1, input_channels] Returns: A float32 Tensor with shape [batch, p0, p1, body_input_depth]'
def bottom(self, x):
raise NotImplementedError('Abstract Method')
'Transform the inputs. Args: xs: A list of num_datashards Tensors (one per shard) each with shape [batch, p0, p1, depth] data_parallelism: a expert_utils.Parallelism object Returns: shaded_body_input: A list of num_datashards Tensors, each with shape [batch, p0, p1, body_input_depth].'
def bottom_sharded(self, xs, data_parallelism):
return data_parallelism(self.bottom, xs)
'Transform one shard of targets. Args: x: An int32 Tensor with shape [batch, p0, p1, target_channels] Returns: A float32 Tensor with shape [batch, p0, p1, body_input_depth]'
def targets_bottom(self, x):
with tf.variable_scope('targets_bottom'): return self.bottom(x)
'Transform the targets. Args: xs: A list of num_datashards Tensors (one per shard) each with shape [batch, p0, p1, target_channels] data_parallelism: a expert_utils.Parallelism object Returns: shaded_body_input: A list of num_datashards Tensors, each with shape [batch, p0, p1, body_input_depth].'
def targets_bottom_sharded(self, xs, data_parallelism):
return data_parallelism(self.targets_bottom, xs)
'Generate predictions/logits for one shard of output. Most classes will override this function. Args: body_output: A Tensor with shape [batch, p0, p1, body_output_depth] targets: A Tensor with shape [batch, p0, p1, targets_channels, top_dimensionality] Returns: A Tensor of class logits.'
def top(self, body_output, targets):
raise NotImplementedError('Abstract Method')
'Generate predictions/logits for all shards. Classes with cross-shard interaction will override this function. Args: sharded_body_output: A list of Tensors. sharded_targets: A list of Tensors. data_parallelism: a expert_utils.Parallelism object. Returns: sharded_logits: A list of Tensors.'
def top_sharded(self, sharded_body_output, sharded_targets, data_parallelism):
return data_parallelism(self.top, sharded_body_output, sharded_targets)
'Compute loss numerator and denominator for one shard of output.'
def loss(self, top_out, targets, weights_fn=common_layers.weights_nonzero):
logits = top_out return common_layers.padded_cross_entropy(logits, targets, self._model_hparams.label_smoothing, weights_fn=weights_fn)
'Compute loss for all shards.'
def loss_sharded(self, sharded_top_out, sharded_targets, data_parallelism):
(sharded_loss_num, sharded_loss_den) = data_parallelism(self.loss, sharded_top_out, sharded_targets) loss = (tf.add_n(sharded_loss_num) / tf.maximum(1.0, tf.add_n(sharded_loss_den))) return loss
'Creates a FeedForwardExpert. Args: hp: hyperparameters. Call FeedForwardExpertParams() to create these. name: a string.'
def __init__(self, hp, name):
self._hp = hp hidden_layer_sizes = (hp.hidden_layer_sizes or []) num_layers = (1 + len(hidden_layer_sizes)) layer_sizes = (([hp.input_size] + hidden_layer_sizes) + [hp.output_size]) self._layer_sizes = layer_sizes self._w = [] for layer in range(num_layers): shape = layer_sizes[layer...
'Evaluate the FeedForwardExpert on the given input. Args: x: a `Tensor` of shape `[batch_size, hp.input_size]` Returns: a `Tensor` of shape `[batch_size, hp.output_size]`'
def Eval(self, x):
hp = self._hp num_layers = len(self._w) for i in xrange(num_layers): x = tf.matmul(x, self._w[i]) if (hp.autoscale and (self._layer_sizes[i] != hp.input_size)): x *= ((self._layer_sizes[i] / hp.input_size) ** (-0.5)) if (((i + 1) < num_layers) and hp.hidden_activation): ...
'Create a Parallelism. Args: device_names_or_functions: A list of of length n, containing device names or device functions (see `tf.device`) reuse: True or None. Whether to reuse variables created in the first replica in the subsequent replicas. caching_devices: Either `None`, or a list of length n containing device n...
def __init__(self, device_names_or_functions, reuse=None, caching_devices=None, daisy_chain_variables=False):
assert device_names_or_functions self._devices = device_names_or_functions self._n = len(device_names_or_functions) self._reuse = reuse self._caching_devices = self._MaybeRepeat(caching_devices) self._daisy_chain_variables = daisy_chain_variables
'A parallel set of function calls (using the specified devices). Args: fn: a function or a list of n functions. *args: additional args. Each arg should either be not a list, or a list of length n. **kwargs: additional keyword args. Each arg should either be not a list, or a list of length n. Returns: either a single ...
def __call__(self, fn, *args, **kwargs):
if args: my_args = TransposeListOfLists([self._MaybeRepeat(arg) for arg in args]) else: my_args = [[] for _ in xrange(self.n)] my_kwargs = [{} for _ in xrange(self.n)] for (k, v) in six.iteritems(kwargs): vals = self._MaybeRepeat(v) for i in xrange(self.n): my...
'Utility function for processing arguments that are singletons or lists. Args: x: either a list of self.n elements, or not a list. Returns: a list of self.n elements.'
def _MaybeRepeat(self, x):
if isinstance(x, list): assert (len(x) == self.n) return x else: return ([x] * self.n)
'Create a NoisyTopKGating network. Args: hp: a hyperparameters created by NoisyTopKGatingParams() name: a string'
def __init__(self, hp, name):
self._vars = [] self._hp = hp self._w_gate = tf.get_variable(('%s_gate' % name), [hp.input_size, hp.num_experts], hp.dtype, hp.initializer) self._vars.append(self._w_gate) if hp.noisy_gating: self._w_noise = tf.get_variable(('%s_noise' % name), [hp.input_size, hp.num_experts], hp.dtype, hp.i...
'Compute noisy top-k gating. Args: x: a `Tensor` of shape `[batch_size, input_size]`. train: a boolean `Scalar`. Setting this to false turns off noise. summaries: a boolean. Whether to add summaries. Returns: gates: a `Tensor` of shape `[batch_size, n]` load: a `Tensor` of shape `[n]`. If we are using noise, this is...
def Eval(self, x, train=True, summaries=False):
with tf.variable_scope('NoisyTopKGating'): hp = self._hp clean_logits = tf.matmul(x, self._w_gate) if hp.noisy_gating: raw_noise_stddev = tf.matmul(x, self._w_noise) noise_stddev = ((tf.nn.softplus(raw_noise_stddev) + hp.noise_epsilon) * tf.to_float(train)) ...
'Create a LocalMixtureOfExperts. Args: gating_hp: hyperparameters for the gating network. e.g. NoisyTopKGatingParams() expert_hp: hyperparameters for the expert networks. e.g. FeedForwardExpertParams() input_size: an integer. output_size: an integer. name: a string.'
def __init__(self, gating_hp, expert_hp, input_size, output_size, name):
self._name = name _SetInputOutputSizes(gating_hp, input_size, None) _SetInputOutputSizes(expert_hp, input_size, output_size) self._gating_hp = gating_hp self._gating = gating_hp.gating_class(gating_hp, (name + '_gating')) self._expert_hp = expert_hp self._experts = [expert_hp.expert_class(ex...
'Evaluate mixture of experts. We provide a convenient debugging tool for determining the set of examples that we passed to each expert. The caller may provide a `Tensor` of "identifiers", of any type whose first dimension matches the number of input examples. The function will then return a list "expert_to_identifiers...
def Eval(self, x, train=True, per_example_multiplier=None, summaries=False, identifiers=None):
gating_hp = self._gating_hp (gates, load) = self._gating.Eval(x, train, summaries) if (per_example_multiplier is not None): gates *= tf.expand_dims(per_example_multiplier, 1) dispatcher = SparseDispatcher(gating_hp.num_experts, gates) expert_input = dispatcher.Dispatch(x) expert_output =...
'Create a DistributedMixtureOfExperts. If `secondary_gating_hp` is `None`, then this is a flat MoE with `primary_gating_hp.num_experts` experts. Otherwise, this is a hierarchical MoE with `primary_gating_hp.num_experts` groups of `secondary_gating_hp.num_experts` experts. The assignemnt of experts (or groups of experts...
def __init__(self, primary_gating_hp, secondary_gating_hp, expert_hp, input_size, output_size, expert_devices, name):
self._name = name _SetInputOutputSizes(primary_gating_hp, input_size, None) _SetInputOutputSizes(expert_hp, input_size, output_size) self._is_hierarchical = (secondary_gating_hp is not None) self._primary_gating_hp = primary_gating_hp self._primary_gating = primary_gating_hp.gating_class(primary...
'Evaluate MoE on given inputs. This class is designed for the case where the rest of the model is using data parallelism. We receive an array of input `Tensor`s, one per datashard, and we produce a list of output Tensors, one per datashard. We provide a convenient debugging tool for determining the set of examples th...
def Eval(self, datashard_devices, xs, train=True, summaries=False, identifiers=None, shadow_xs=None):
n1 = self._primary_gating_hp.num_experts epsilon = 1e-10 assert (len(datashard_devices) == len(xs)) num_datashards = len(xs) expert_devices = self._expert_devices has_identifiers = (identifiers is not None) (primary_gates, primary_smooth_load) = Parallel(datashard_devices, self._primary_gati...
'Create a SparseDispatcher. Args: num_experts: an integer. gates: a `Tensor` of shape `[batch_size, num_experts]`. Returns: a SparseDispatcher'
def __init__(self, num_experts, gates):
self._gates = gates self._num_experts = num_experts where = tf.to_int32(tf.where((tf.transpose(gates) > 0))) (self._expert_index, self._batch_index) = tf.unstack(where, num=2, axis=1) self._part_sizes_tensor = tf.reduce_sum(tf.to_int32((gates > 0)), [0]) self._nonzero_gates = tf.gather(tf.reshap...
'Create one input Tensor for each expert. The `Tensor` for a expert `i` contains the slices of `inp` corresponding to the batch elements `b` where `gates[b, i] > 0`. Args: inp: a `Tensor` of shape \'[batch_size, <extra_input_dims>]` Returns: a list of `num_experts` `Tensor`s with shapes `[expert_batch_size_i, <extra_in...
def Dispatch(self, inp):
inp = tf.gather(inp, self._batch_index) return tf.split(inp, self._part_sizes_tensor, 0)
'Sum together the expert output, weighted by the gates. The slice corresponding to a particular batch element `b` is computed as the sum over all experts `i` of the expert output, weighted by the corresponding gate values. If `multiply_by_gates` is set to False, the gate values are ignored. Args: expert_out: a list of...
def Combine(self, expert_out, multiply_by_gates=True):
stitched = ConvertGradientToTensor(tf.concat(expert_out, 0)) if multiply_by_gates: stitched *= tf.expand_dims(self._nonzero_gates, 1) combined = tf.unsorted_segment_sum(stitched, self._batch_index, tf.shape(self._gates)[0]) return combined
'Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32` and shapes `[expert_batch_size_i]`'
def ExpertToGates(self):
return tf.split(self._nonzero_gates, self._part_sizes_tensor, 0)
'Create a DistributedSparseDispatcher. Args: datashard_devices: a list of num_datashards device strings. expert_devices: a list of num_experts device strings. gates: a list of num_datashards `Tensor`s of shapes `[batch_size[d], num_experts]`. Returns: a DistributedSparseDispatcher'
def __init__(self, datashard_devices, expert_devices, gates):
self._gates = gates self._num_experts = len(expert_devices) assert (len(gates) == len(datashard_devices)) self._num_datashards = len(gates) self._datashard_devices = datashard_devices self._expert_devices = expert_devices self._dispatchers = Parallel(self._datashard_devices, SparseDispatcher...
'Create one input Tensor for each expert. Args: inp: a list of length num_datashards `Tensor`s with shapes `[batch_size[d], <extra_input_dims>]`. Returns: a list of `num_experts` `Tensor`s with shapes `[num_examples[i], <extra_input_dims>]`.'
def Dispatch(self, inp):
dispatched = Parallel(self._datashard_devices, (lambda a, b: a.Dispatch(b)), self._dispatchers, inp) ret = Parallel(self._expert_devices, tf.concat, TransposeListOfLists(dispatched), 0) if (ret[0].dtype == tf.float32): ret = Parallel(self._expert_devices, ConvertGradientToTensor, ret) return ret...
'Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`.'
def Combine(self, expert_out, multiply_by_gates=True):
expert_part_sizes = tf.unstack(tf.stack([self._dispatchers[d].part_sizes for d in xrange(self._num_datashards)]), num=self._num_experts, axis=1) expert_output_parts = Parallel(self._expert_devices, tf.split, expert_out, expert_part_sizes) expert_output_parts_t = TransposeListOfLists(expert_output_parts) ...
'Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s of type `tf.float32`.'
def ExpertToGates(self):
return Parallel(self._expert_devices, tf.concat, TransposeListOfLists(Parallel(self._datashard_devices, [self._dispatchers[d].ExpertToGates for d in xrange(self._num_datashards)])), 0)
'Constructs a Dispatcher. Args: data_parallelism: a Parallelism object. model_parallelism: a Parallelism object. gates: a list of 1d integer `Tensor`s, one per datashard. Says which expert to use for each batch element. Returns: a DistributedSingleDispatcher'
def __init__(self, data_parallelism, model_parallelism, gates):
gates = data_parallelism(tf.to_int32, gates) self._gates = gates self._data_parallelism = data_parallelism self._model_parallelism = model_parallelism def _PartSizes(gates): return tf.unsorted_segment_sum(tf.ones_like(gates), gates, model_parallelism.n) part_sizes_by_datashard = data_par...
'Reshuffles input `Tensor`s to produce output `Tensor`s. The dimensions of all input and output `Tensor`s match, except for dimension 0. In dimension 0, the input `Tensor`s match the corresponding `gates` `Tensor`s which were passed to the constructor. Args: d_tensors: a list of `Tensor`s, one per datashard. Returns: ...
def Dispatch(self, d_tensors):
parts = self._data_parallelism(tf.dynamic_partition, d_tensors, self._gates, self._model_parallelism.n) parts_by_expert = TransposeListOfLists(parts) x_tensors = self._model_parallelism(tf.concat, parts_by_expert, 0) return x_tensors
'Reshuffles per-expert `Tensor`s to produce per-datashard `Tensor`s. Dispatch must have been called at least once first. The dimensions of all input and output `Tensor`s match, except for dimension 0. In dimension 0, the input `Tensor`s match the corresponding outputs of `Dispatch`, and the output `Tensor`s match the ...
def Combine(self, x_tensors):
parts = self._model_parallelism(tf.split, x_tensors, self._part_sizes_by_expert) d_tensors = self._data_parallelism(tf.dynamic_stitch, self._stitch_indices, TransposeListOfLists(parts)) return d_tensors
'Run a step of the network.'
def step(self, sess, inp, target, do_backward_in, noise_param=None, beam_size=2, eos_id=2, eos_cost=0.0, update_mem=None, state=None):
(batch_size, height, length) = (inp.shape[0], inp.shape[1], inp.shape[2]) do_backward = do_backward_in train_mode = True if (do_backward_in is None): do_backward = False train_mode = False if (update_mem is None): update_mem = do_backward feed_in = {} if (state is Non...
'Grow the program body.'
def grow_body(self, new_var_name, dependencies, types_to_vars):
choices = [] for f in self.functions: if all([(a in types_to_vars.keys()) for a in f.arg_types]): choices.append(f) f = random.choice(choices) args = [] for t in f.arg_types: possible_vars = random.choice(types_to_vars[t]) var = random.choice(possible_vars) ...
'Grow the program.'
def grow(self, program_len, input_types):
var_names = list(reversed(map(chr, range(97, 123)))) dependencies = dict() types_to_vars = dict() input_names = [] for t in input_types: var = var_names.pop() dependencies[var] = [] types_to_vars.setdefault(t, []).append(var) input_names.append(var) statements = [...
'Evaluate this program.'
def evaluate(self, inputs):
if (len(inputs) != len(self.input_names)): raise AssertionError(('inputs and input_names have tohave the same len. inp: %s , names: %s' % (str(inputs), str(self.input_names)))) inp_str = '' for (name, inp) in zip(self.input_names, inputs): inp_str += (((na...
'Returns the number of classes in the data set.'
def num_classes(self):
return 5
'Returns the number of examples in the data subset.'
def num_examples_per_epoch(self):
if (self.subset == 'train'): return 3170 if (self.subset == 'validation'): return 500
'Instruction to download and extract the tarball from Flowers website.'
def download_message(self):
print(('Failed to find any Flowers %s files' % self.subset)) print('') print('If you have already downloaded and processed the data, then make sure to set --data_dir to point to the directory containing the location of ...
'Initialize VariableDeviceChooser. Args: num_parameter_servers: number of parameter servers. ps_device: string representing the parameter server device. placement: string representing the placement of the variable either CPU:0 or GPU:0. When using parameter servers forced to CPU:0.'
def __init__(self, num_parameter_servers=0, ps_device='/job:ps', placement='CPU:0'):
self._num_ps = num_parameter_servers self._ps_device = ps_device self._placement = (placement if (num_parameter_servers == 0) else 'CPU:0') self._next_task_id = 0
'Initialize dataset using a subset and the path to the data.'
def __init__(self, name, subset):
assert (subset in self.available_subsets()), self.available_subsets() self.name = name self.subset = subset
'Returns the number of classes in the data set.'
@abstractmethod def num_classes(self):
pass
'Returns the number of examples in the data subset.'
@abstractmethod def num_examples_per_epoch(self):
pass
'Prints a download message for the Dataset.'
@abstractmethod def download_message(self):
pass
'Returns the list of available subsets.'
def available_subsets(self):
return ['train', 'validation']
'Returns a python list of all (sharded) data subset files. Returns: python list of all (sharded) data set files. Raises: ValueError: if there are not data_files matching the subset.'
def data_files(self):
tf_record_pattern = os.path.join(FLAGS.data_dir, ('%s-*' % self.subset)) data_files = tf.gfile.Glob(tf_record_pattern) if (not data_files): print(('No files found for dataset %s/%s at %s' % (self.name, self.subset, FLAGS.data_dir))) self.download_message() exit((...
'Return a reader for a single entry from the data set. See io_ops.py for details of Reader class. Returns: Reader object that reads the data set.'
def reader(self):
return tf.TFRecordReader()
'Returns the number of classes in the data set.'
def num_classes(self):
return 1000
'Returns the number of examples in the data set.'
def num_examples_per_epoch(self):
if (self.subset == 'train'): return 1281167 if (self.subset == 'validation'): return 50000