desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Instruction to download and extract the tarball from Flowers website.'
def download_message(self):
print(('Failed to find any ImageNet %s files' % self.subset)) print('') print('If you have already downloaded and processed the data, then make sure to set --data_dir to point to the directory containing the location of ...
'Constructs classifier graph from inputs to classifier loss. * Caches the VatxtInput object in `self.cl_inputs` * Caches tensors: `cl_embedded`, `cl_logits`, `cl_loss` Returns: loss: scalar float.'
def classifier_graph(self):
inputs = _inputs('train', pretrain=False) self.cl_inputs = inputs embedded = self.layers['embedding'](inputs.tokens) self.tensors['cl_embedded'] = embedded (_, next_state, logits, loss) = self.cl_loss_from_embedding(embedded, return_intermediates=True) tf.summary.scalar('classification_loss', lo...
'Constructs LM graph from inputs to LM loss. * Caches the VatxtInput object in `self.lm_inputs` * Caches tensors: `lm_embedded` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float.'
def language_model_graph(self, compute_loss=True):
inputs = _inputs('train', pretrain=True) self.lm_inputs = inputs return self._lm_loss(inputs, compute_loss=compute_loss)
'Constructs classifier evaluation graph. Args: dataset: the labeled dataset to evaluate, {\'train\', \'test\', \'valid\'}. Returns: eval_ops: dict<metric name, tuple(value, update_op)> var_restore_dict: dict mapping variable restoration names to variables. Trainable variables will be mapped to their moving average name...
def eval_graph(self, dataset='test'):
inputs = _inputs(dataset, pretrain=False) embedded = self.layers['embedding'](inputs.tokens) (_, next_state, logits, _) = self.cl_loss_from_embedding(embedded, inputs=inputs, return_intermediates=True) eval_ops = {'accuracy': tf.contrib.metrics.streaming_accuracy(layers_lib.predictions(logits), inputs.l...
'Compute classification loss from embedding. Args: embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim] inputs: VatxtInput, defaults to self.cl_inputs. return_intermediates: bool, whether to return intermediate tensors or only the final loss. Returns: If return_intermediates is True: lstm_out, next_sta...
def cl_loss_from_embedding(self, embedded, inputs=None, return_intermediates=False):
if (inputs is None): inputs = self.cl_inputs (lstm_out, next_state) = self.layers['lstm'](embedded, inputs.state, inputs.length) logits = self.layers['cl_logits'](lstm_out) loss = layers_lib.classification_loss(logits, inputs.labels, inputs.weights) if return_intermediates: return (l...
'Compute adversarial loss based on FLAGS.adv_training_method.'
def adversarial_loss(self):
def random_perturbation_loss(): return adv_lib.random_perturbation_loss(self.tensors['cl_embedded'], self.cl_inputs.length, self.cl_loss_from_embedding) def adversarial_loss(): return adv_lib.adversarial_loss(self.tensors['cl_embedded'], self.tensors['cl_loss'], self.cl_loss_from_embedding) ...
'Constructs classifier graph from inputs to classifier loss. * Caches the VatxtInput objects in `self.cl_inputs` * Caches tensors: `cl_embedded` (tuple of forward and reverse), `cl_logits`, `cl_loss` Returns: loss: scalar float.'
def classifier_graph(self):
inputs = _inputs('train', pretrain=False, bidir=True) self.cl_inputs = inputs (f_inputs, _) = inputs embedded = [self.layers['embedding'](inp.tokens) for inp in inputs] self.tensors['cl_embedded'] = embedded (_, next_states, logits, loss) = self.cl_loss_from_embedding(embedded, return_intermedia...
'Constructs forward and reverse LM graphs from inputs to LM losses. * Caches the VatxtInput objects in `self.lm_inputs` * Caches tensors: `lm_embedded`, `lm_embedded_reverse` Args: compute_loss: bool, whether to compute and return the loss or stop after the LSTM computation. Returns: loss: scalar float, sum of forward ...
def language_model_graph(self, compute_loss=True):
inputs = _inputs('train', pretrain=True, bidir=True) self.lm_inputs = inputs (f_inputs, r_inputs) = inputs f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss) r_loss = self._lm_loss(r_inputs, emb_key='lm_embedded_reverse', lstm_layer='lstm_reverse', lm_loss_layer='lm_loss_reverse', loss_name...
'Constructs classifier evaluation graph. Args: dataset: the labeled dataset to evaluate, {\'train\', \'test\', \'valid\'}. Returns: eval_ops: dict<metric name, tuple(value, update_op)> var_restore_dict: dict mapping variable restoration names to variables. Trainable variables will be mapped to their moving average name...
def eval_graph(self, dataset='test'):
inputs = _inputs(dataset, pretrain=False, bidir=True) embedded = [self.layers['embedding'](inp.tokens) for inp in inputs] (_, next_states, logits, _) = self.cl_loss_from_embedding(embedded, inputs=inputs, return_intermediates=True) (f_inputs, _) = inputs eval_ops = {'accuracy': tf.contrib.metrics.st...
'Compute classification loss from embedding. Args: embedded: Length 2 tuple of 3-D float Tensor [batch_size, num_timesteps, embedding_dim]. inputs: Length 2 tuple of VatxtInput, defaults to self.cl_inputs. return_intermediates: bool, whether to return intermediate tensors or only the final loss. Returns: If return_inte...
def cl_loss_from_embedding(self, embedded, inputs=None, return_intermediates=False):
if (inputs is None): inputs = self.cl_inputs out = [] for (layer_name, emb, inp) in zip(['lstm', 'lstm_reverse'], embedded, inputs): out.append(self.layers[layer_name](emb, inp.state, inp.length)) (lstm_outs, next_states) = zip(*out) lstm_out = tf.concat(lstm_outs, 1) logits = se...
'Compute adversarial loss based on FLAGS.adv_training_method.'
def adversarial_loss(self):
def random_perturbation_loss(): return adv_lib.random_perturbation_loss_bidir(self.tensors['cl_embedded'], self.cl_inputs[0].length, self.cl_loss_from_embedding) def adversarial_loss(): return adv_lib.adversarial_loss_bidir(self.tensors['cl_embedded'], self.tensors['cl_loss'], self.cl_loss_from_...
'Construct VatxtInput. Args: batch: NextQueuedSequenceBatch. state_name: str, name of state to fetch and save. tokens: int Tensor, tokens. Defaults to batch\'s F_TOKEN_ID sequence. num_states: int The number of states to store. eos_id: int Id of end of Sequence.'
def __init__(self, batch, state_name=None, tokens=None, num_states=0, eos_id=None):
self._batch = batch self._state_name = state_name self._tokens = (tokens if (tokens is not None) else batch.sequences[data_utils.SequenceWrapper.F_TOKEN_ID]) self._num_states = num_states w = batch.sequences[data_utils.SequenceWrapper.F_WEIGHT] w = tf.transpose(w, [1, 0]) w = tf.reshape(w, [...
'Constructs Timestep from empty Features.'
def __init__(self, token, label, weight, multivalent_tokens=False):
self._token = token self._label = label self._weight = weight self._multivalent_tokens = multivalent_tokens self._fill_with_defaults()
'Constructs a VGSLImageModel. Args: mode: One of "train", "eval" model_spec: Full model specification string, for reference only. initial_learning_rate: Initial learning rate for the network. final_learning_rate: Final learning rate for the network. halflife: Number of steps over which to halve the difference b...
def __init__(self, mode, model_spec, initial_learning_rate, final_learning_rate, halflife):
self.model_spec = model_spec self.layers = None self.mode = mode self.initial_learning_rate = initial_learning_rate self.final_learning_rate = final_learning_rate self.decay_steps = (halflife / DECAY_STEPS_FACTOR) self.decay_rate = DECAY_RATE self.labels = None self.sparse_labels = N...
'Builds the model from the separate input/layers/output spec strings. Args: input_pattern: File pattern of the data in tfrecords of TF Example format. input_spec: Specification of the input layer: batchsize,height,width,depth (4 comma-separated integers) Training will run with batches of batchsize images, but runtime c...
def Build(self, input_pattern, input_spec, model_spec, output_spec, optimizer_type, num_preprocess_threads, reader):
self.global_step = tf.Variable(0, name='global_step', trainable=False) shape = _ParseInputSpec(input_spec) (out_dims, out_func, num_classes) = _ParseOutputSpec(output_spec) self.using_ctc = (out_func == 'c') (images, heights, widths, labels, sparse, _) = vgsl_input.ImageInput(input_pattern, num_prep...
'Runs a training step in the session. Args: sess: Session in which to train the model. Returns: loss, global_step.'
def TrainAStep(self, sess):
(_, loss, step) = sess.run([self.train_op, self.loss, self.global_step]) return (loss, step)
'Restores the model from the given checkpoint path into the session. Args: checkpoint_path: File pathname of the checkpoint. sess: Session in which to restore the model. Returns: global_step of the model.'
def Restore(self, checkpoint_path, sess):
self.saver.restore(sess, checkpoint_path) return tf.train.global_step(sess, self.global_step)
'Runs a step for eval in the session. Args: sess: Session in which to run the model. Returns: output tensor result, labels tensor result.'
def RunAStep(self, sess):
return sess.run([self.output, self.labels])
'Adds the output layer and loss function. Args: prev_layer: Output of last layer of main network. out_dims: Number of output dimensions, 0, 1 or 2. out_func: Output non-linearity. \'s\' or \'c\'=softmax, \'l\'=logistic. num_classes: Number of outputs/size of last output dimension.'
def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes):
height_in = shapes.tensor_dim(prev_layer, dim=1) (logits, outputs) = self._AddOutputLayer(prev_layer, out_dims, out_func, num_classes) if (self.mode == 'train'): self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func) tf.summary.scalar('loss', self.loss) elif (out_dims =...
'Add the fully-connected logits and SoftMax/Logistic output Layer. Args: prev_layer: Output of last layer of main network. out_dims: Number of output dimensions, 0, 1 or 2. out_func: Output non-linearity. \'s\' or \'c\'=softmax, \'l\'=logistic. num_classes: Number of outputs/size of last output dimension. Return...
def _AddOutputLayer(self, prev_layer, out_dims, out_func, num_classes):
batch_in = shapes.tensor_dim(prev_layer, dim=0) height_in = shapes.tensor_dim(prev_layer, dim=1) width_in = shapes.tensor_dim(prev_layer, dim=2) depth_in = shapes.tensor_dim(prev_layer, dim=3) if out_dims: shaped = tf.reshape(prev_layer, [(-1), depth_in]) else: shaped = tf.reshap...
'Add the appropriate loss function. Args: logits: Pre-softmax/logistic fully-connected output shaped to out_dims. height_in: Height of logits before going into the softmax layer. out_dims: Number of output dimensions, 0, 1 or 2. out_func: Output non-linearity. \'s\' or \'c\'=softmax, \'l\'=logistic. Returns: loss...
def _AddLossFunction(self, logits, height_in, out_dims, out_func):
if (out_func == 'c'): ctc_input = tf.transpose(logits, [1, 0, 2]) widths = self.layers.GetLengths(dim=2, factor=height_in) cross_entropy = tf.nn.ctc_loss(ctc_input, self.sparse_labels, widths) elif (out_func == 's'): if (out_dims == 2): self.labels = _PadLabels3d(logi...
'Adds an optimizer with learning rate decay to minimize self.loss. Args: optimizer_type: One of \'GradientDescent\', \'AdaGrad\', \'Momentum\', \'Adam\'. Raises: ValueError: if the optimizer type is unrecognized.'
def _AddOptimizer(self, optimizer_type):
learn_rate_delta = (self.initial_learning_rate - self.final_learning_rate) learn_rate_dec = tf.add(tf.train.exponential_decay(learn_rate_delta, self.global_step, self.decay_steps, self.decay_rate), self.final_learning_rate) if (optimizer_type == 'GradientDescent'): opt = tf.train.GradientDescentOpti...
'Constructs a VGSLSpecs. Args: widths: Tensor of size batch_size of the widths of the inputs. heights: Tensor of size batch_size of the heights of the inputs. is_training: True if the graph should be build for training.'
def __init__(self, widths, heights, is_training):
self.model_str = None self.is_training = is_training self.widths = widths self.heights = heights self.reduction_factors = [1.0, 1.0, 1.0, 1.0] self.valid_ops = [self.AddSeries, self.AddParallel, self.AddConvLayer, self.AddMaxPool, self.AddDropout, self.AddReShape, self.AddFCLayer, self.AddLSTMLa...
'Builds a network with input prev_layer from a VGSLSpecs description. Args: prev_layer: The input tensor. model_str: Model definition similar to Tesseract as follows: ============ FUNCTIONAL OPS ============ C(s|t|r|l|m)[{name}]<y>,<x>,<d> Convolves using a y,x window, with no shrinkage, SAME infill, d outputs, with s...
def Build(self, prev_layer, model_str):
self.model_str = model_str (final_layer, _) = self.BuildFromString(prev_layer, 0) return final_layer
'Returns the lengths of the batch of elements in the given dimension. WARNING: The returned sizes may not exactly match TF\'s calculation. Args: dim: dimension to get the sizes of, in [1,2]. batch, depth not allowed. factor: A scalar value to multiply by. Returns: The original heights/widths scaled by the current scali...
def GetLengths(self, dim=2, factor=1):
if (dim == 1): lengths = self.heights elif (dim == 2): lengths = self.widths else: raise ValueError('Invalid dimension given to GetLengths') lengths = tf.cast(lengths, tf.float32) if (self.reduction_factors[dim] is not None): lengths = tf.div(lengths, self...
'Adds the layers defined by model_str[index:] to the model. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, next model_str index. Raises: ValueError: If the model string is unrecognized.'
def BuildFromString(self, prev_layer, index):
index = self._SkipWhitespace(index) for op in self.valid_ops: (output_layer, next_index) = op(prev_layer, index) if (output_layer is not None): return (output_layer, next_index) if (output_layer is not None): return (output_layer, next_index) raise ValueError(('Unreco...
'Builds a sequence of layers for a VGSLSpecs model. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the series, end index in model_str. Raises: ValueError: If [] are unbalanced.'
def AddSeries(self, prev_layer, index):
if (self.model_str[index] != '['): return (None, None) index += 1 while ((index < len(self.model_str)) and (self.model_str[index] != ']')): (prev_layer, index) = self.BuildFromString(prev_layer, index) if (index == len(self.model_str)): raise ValueError(('Missing ] at en...
'tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don\'t match.'
def AddParallel(self, prev_layer, index):
if (self.model_str[index] != '('): return (None, None) index += 1 layers = [] num_dims = 0 original_factors = self.reduction_factors final_factors = None while ((index < len(self.model_str)) and (self.model_str[index] != ')')): self.reduction_factors = original_factors ...
'Add a single standard convolutional layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str.'
def AddConvLayer(self, prev_layer, index):
pattern = re.compile('(C)(s|t|r|l|m)({\\w+})?(\\d+),(\\d+),(\\d+)') m = pattern.match(self.model_str, index) if (m is None): return (None, None) name = self._GetLayerName(m.group(0), index, m.group(3)) width = int(m.group(4)) height = int(m.group(5)) depth = int(m.group(6)) fn = ...
'Add a maxpool layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str.'
def AddMaxPool(self, prev_layer, index):
pattern = re.compile('(Mp)({\\w+})?(\\d+),(\\d+)(?:,(\\d+),(\\d+))?') m = pattern.match(self.model_str, index) if (m is None): return (None, None) name = self._GetLayerName(m.group(0), index, m.group(2)) height = int(m.group(3)) width = int(m.group(4)) y_stride = (height if (m.group(...
'Adds a dropout layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str.'
def AddDropout(self, prev_layer, index):
pattern = re.compile('(Do)({\\w+})?') m = pattern.match(self.model_str, index) if (m is None): return (None, None) name = self._GetLayerName(m.group(0), index, m.group(2)) layer = slim.dropout(prev_layer, 0.5, is_training=self.is_training, scope=name) return (layer, m.end())
'Reshapes the input tensor by moving each (x_scale,y_scale) rectangle to. the depth dimension. NOTE that the TF convention is that inputs are [batch, y, x, depth]. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str.'
def AddReShape(self, prev_layer, index):
pattern = re.compile('(S)(?:{(\\w)})?(\\d+)\\((\\d+)x(\\d+)\\)(\\d+),(\\d+)') m = pattern.match(self.model_str, index) if (m is None): return (None, None) name = self._GetLayerName(m.group(0), index, m.group(2)) src_dim = int(m.group(3)) part_a = int(m.group(4)) part_b = int(m.group(...
'Parse expression and add Fully Connected Layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str.'
def AddFCLayer(self, prev_layer, index):
pattern = re.compile('(F)(s|t|r|l|m)({\\w+})?(\\d+)') m = pattern.match(self.model_str, index) if (m is None): return (None, None) fn = self._NonLinearity(m.group(2)) name = self._GetLayerName(m.group(0), index, m.group(3)) depth = int(m.group(4)) input_depth = ((shapes.tensor_dim(pr...
'Parse expression and add LSTM Layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str.'
def AddLSTMLayer(self, prev_layer, index):
pattern = re.compile('(L)(f|r|b)(x|y)(s)?({\\w+})?(\\d+)') m = pattern.match(self.model_str, index) if (m is None): return (None, None) direction = m.group(2) dim = m.group(3) summarize = (m.group(4) == 's') name = self._GetLayerName(m.group(0), index, m.group(5)) depth = int(m.g...
'Adds an LSTM layer with the given pre-parsed attributes. Always maps 4-D to 4-D regardless of summarize. Args: prev_layer: Input tensor. direction: \'forward\' \'backward\' or \'bidirectional\' dim: \'x\' or \'y\', dimension to consider as time. summarize: True if we are to return only the last timestep. dept...
def _LSTMLayer(self, prev_layer, direction, dim, summarize, depth, name):
if (dim == 'x'): lengths = self.GetLengths(2, 1) inputs = prev_layer else: lengths = self.GetLengths(1, 1) inputs = tf.transpose(prev_layer, [0, 2, 1, 3], name=(name + '_ytrans_in')) input_batch = shapes.tensor_dim(inputs, 0) num_slices = shapes.tensor_dim(inputs, 1) ...
'Returns the non-linearity function pointer for the given string code. For forwards compatibility, allows the full names for stand-alone non-linearities, as well as the single-letter names used in ops like C,F. Args: code: String code representing a non-linearity function. Returns: non-linearity function represented by...
def _NonLinearity(self, code):
if (code in ['s', 'Sig']): return tf.sigmoid elif (code in ['t', 'Tanh']): return tf.tanh elif (code in ['r', 'Relu']): return tf.nn.relu elif (code in ['m', 'Smax']): return tf.nn.softmax return None
'Generates a name for the op, using a user-supplied name if possible. Args: op_str: String representing the parsed op. index: Position in model_str of the start of the op. name_str: User-supplied {name} with {} that need removing or None. Returns: Selected name.'
def _GetLayerName(self, op_str, index, name_str):
if name_str: return name_str[1:(-1)] else: return ((op_str.translate(self.transtab) + '_') + str(index))
'Skips any leading whitespace in the model description. Args: index: Position in model_str to start parsing Returns: end index in model_str of whitespace.'
def _SkipWhitespace(self, index):
pattern = re.compile('([ \\t\\n]+)') m = pattern.match(self.model_str, index) if (m is None): return index return m.end()
'Tests that the simple CTC decoder drops nulls and duplicates.'
def testCodesFromCTC(self):
ctc_labels = [9, 9, 9, 1, 9, 2, 2, 3, 9, 9, 0, 0, 1, 9, 1, 9, 9, 9] decode = decoder.Decoder(filename=None) non_null_labels = decode._CodesFromCTC(ctc_labels, merge_dups=False, null_label=9) self.assertEqual(non_null_labels, [1, 2, 2, 3, 0, 0, 1, 1]) idempotent_labels = decode._CodesFromCTC(non_null...
'Tests that the decoder can decode sequences including multi-codes.'
def testStringFromCTC(self):
ctc_labels = [9, 6, 9, 1, 3, 9, 4, 9, 5, 5, 9, 5, 0, 2, 1, 3, 9, 4, 9] decode = decoder.Decoder(filename=_testdata('charset_size_10.txt')) text = decode.StringFromCTC(ctc_labels, merge_dups=True, null_label=9) self.assertEqual(text, 'farm barn')
'Tests that the percent calculation works as expected.'
def testComputeErrorRate(self):
rate = ec.ComputeErrorRate(error_count=0, truth_count=0) self.assertEqual(rate, 100.0) rate = ec.ComputeErrorRate(error_count=1, truth_count=0) self.assertEqual(rate, 100.0) rate = ec.ComputeErrorRate(error_count=10, truth_count=1) self.assertEqual(rate, 100.0) rate = ec.ComputeErrorRate(err...
'Tests that the error counter works as expected.'
def testCountErrors(self):
truth_str = 'farm barn' counts = ec.CountErrors(ocr_text=truth_str, truth_text=truth_str) self.assertEqual(counts, ec.ErrorCounts(fn=0, fp=0, truth_count=9, test_count=9)) dot_str = 'farm barn.' counts = ec.CountErrors(ocr_text=dot_str, truth_text=truth_str) self.assertEqual(counts, ec.Err...
'Tests that the error counter works as expected.'
def testCountWordErrors(self):
truth_str = 'farm barn' counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=truth_str) self.assertEqual(counts, ec.ErrorCounts(fn=0, fp=0, truth_count=2, test_count=2)) dot_str = 'farm barn.' counts = ec.CountWordErrors(ocr_text=dot_str, truth_text=truth_str) self.assertEqual(counts...
'The parser must return the numbers in the correct order.'
def testParseInputSpec(self):
shape = vgsl_model._ParseInputSpec(input_spec='32,42,256,3') self.assertEqual(shape, vgsl_input.ImageShape(batch_size=32, height=42, width=256, depth=3)) shape = vgsl_model._ParseInputSpec(input_spec='1,0,0,3') self.assertEqual(shape, vgsl_input.ImageShape(batch_size=1, height=None, width=None, depth=3)...
'The parser must return the correct args in the correct order.'
def testParseOutputSpec(self):
(out_dims, out_func, num_classes) = vgsl_model._ParseOutputSpec(output_spec='O1c142') self.assertEqual(out_dims, 1) self.assertEqual(out_func, 'c') self.assertEqual(num_classes, 142) (out_dims, out_func, num_classes) = vgsl_model._ParseOutputSpec(output_spec='O2s99') self.assertEqual(out_dims, 2...
'Must pad timesteps in labels to match logits.'
def testPadLabels2d(self):
with self.test_session() as sess: ph_logits = tf.placeholder(tf.float32, shape=(None, None, 42)) ph_labels = tf.placeholder(tf.int64, shape=(None, None)) padded_labels = vgsl_model._PadLabels2d(tf.shape(ph_logits)[1], ph_labels) real_logits = _rand(4, 97, 42) real_labels = _r...
'Must pad height and width in labels to match logits. The tricky thing with 3-d is that the rows and columns need to remain intact, so we\'ll test it with small known data.'
def testPadLabels3d(self):
with self.test_session() as sess: ph_logits = tf.placeholder(tf.float32, shape=(None, None, None, 42)) ph_labels = tf.placeholder(tf.int64, shape=(None, None, None)) padded_labels = vgsl_model._PadLabels3d(ph_logits, ph_labels) real_logits = _rand(1, 3, 4, 42) real_labels = n...
'Tests that the output sizes match when training/running real 0d data. Uses mnist with dual summarizing LSTMs to reduce to a single value.'
def testEndToEndSizes0d(self):
filename = _testdata('mnist-tiny') with self.test_session() as sess: model = vgsl_model.InitNetwork(filename, model_spec='4,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfxs16]O0s12', mode='train') tf.global_variables_initializer().run(session=sess) coord = tf.train.Coordinator() tf.trai...
'Tests that the output sizes match when training with CTC. Basic bidi LSTM on top of convolution and summarizing LSTM with CTC.'
def testEndToEndSizes1dCTC(self):
filename = _testdata('arial-32-tiny') with self.test_session() as sess: model = vgsl_model.InitNetwork(filename, model_spec='2,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lbx100]O1c105', mode='train') tf.global_variables_initializer().run(session=sess) coord = tf.train.Coordinator() tf....
'Tests that the output sizes match when training/running 1 data. Convolution, summarizing LSTM with fwd rev fwd to allow no CTC.'
def testEndToEndSizes1dFixed(self):
filename = _testdata('numbers-16-tiny') with self.test_session() as sess: model = vgsl_model.InitNetwork(filename, model_spec='8,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfx64 Lrx64 Lfx64]O1s12', mode='train') tf.global_variables_initializer().run(session=sess) coord = tf.train.Coordin...
'Tests that a tiled input can be reshaped to the batch dimension.'
def testReshapeTile(self):
fake = tf.placeholder(tf.float32, shape=(None, None, None, self.depth), name='inputs') real = _rand(self.batch_size, self.im_height, self.im_width, self.depth) with self.test_session() as sess: outputs = shapes.transposing_reshape(fake, src_dim=2, part_a=3, part_b=(-1), dest_dim_a=0, dest_dim_b=2) ...
'Tests that depth can be reshaped to the x dimension.'
def testReshapeDepth(self):
fake = tf.placeholder(tf.float32, shape=(None, None, None, self.depth), name='inputs') real = _rand(self.batch_size, self.im_height, self.im_width, self.depth) with self.test_session() as sess: outputs = shapes.transposing_reshape(fake, src_dim=3, part_a=4, part_b=(-1), dest_dim_a=2, dest_dim_b=3) ...
'Case: dest_a == src, dest_b < src: Split with Least sig part going left.'
def testTransposingReshape_2_2_3_2_1(self):
with self.test_session() as sess: fake = tf.placeholder(tf.float32, shape=(None, None, None, 2), name='inputs') outputs = shapes.transposing_reshape(fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=2, dest_dim_b=1) real = np.arange(120).reshape((5, 2, 6, 2)) np_array = sess.run([outpu...
'Case: dest_a == src, dest_b > src: Split with Least sig part going right.'
def testTransposingReshape_2_2_3_2_3(self):
with self.test_session() as sess: fake = tf.placeholder(tf.float32, shape=(None, None, None, 2), name='inputs') outputs = shapes.transposing_reshape(fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=2, dest_dim_b=3) real = np.arange(120).reshape((5, 2, 6, 2)) np_array = sess.run([outpu...
'Case: dest_a == src, dest_b == src. Transpose within dimension 2.'
def testTransposingReshape_2_2_3_2_2(self):
with self.test_session() as sess: fake = tf.placeholder(tf.float32, shape=(None, None, None, 2), name='inputs') outputs = shapes.transposing_reshape(fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=2, dest_dim_b=2) real = np.arange(120).reshape((5, 2, 6, 2)) np_array = sess.run([outpu...
'Case: dest_a < src, dest_b == src. Split with Most sig part going left.'
def testTransposingReshape_2_2_3_1_2(self):
with self.test_session() as sess: fake = tf.placeholder(tf.float32, shape=(None, None, None, 2), name='inputs') outputs = shapes.transposing_reshape(fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=1, dest_dim_b=2) real = np.arange(120).reshape((5, 2, 6, 2)) np_array = sess.run([outpu...
'Case: dest_a < src, dest_b == src. Split with Most sig part going right.'
def testTransposingReshape_2_2_3_3_2(self):
with self.test_session() as sess: fake = tf.placeholder(tf.float32, shape=(None, None, None, 2), name='inputs') outputs = shapes.transposing_reshape(fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=3, dest_dim_b=2) real = np.arange(120).reshape((5, 2, 6, 2)) np_array = sess.run([outpu...
'Tests that the output of the graph of the given spec has target_shape.'
def ExpectScaledSize(self, spec, target_shape, factor=1):
with tf.Graph().as_default(): with self.test_session() as sess: self.SetupInputs() vgsl = vgslspecs.VGSLSpecs(self.ph_widths, self.ph_heights, True) outputs = vgsl.Build(self.ph_image, spec) target_widths = tf.div(self.in_widths, factor).eval() tar...
'Test all types of Conv. There is no scaling.'
def testSameSizeConv(self):
self.ExpectScaledSize('[Cs{MyConv}5,5,16 Ct3,3,12 Cr4,4,24 Cl5,5,64]', (self.batch_size, self.max_height, self.max_width, 64))
'Test all non-reducing LSTMs. Output depth is doubled with BiDi.'
def testSameSizeLSTM(self):
self.ExpectScaledSize('[Lfx16 Lrx8 Do Lbx24 Lfy12 Do{MyDo} Lry7 Lby32]', (self.batch_size, self.max_height, self.max_width, 64))
'Parallel affects depth, but not scale.'
def testSameSizeParallel(self):
self.ExpectScaledSize('[Cs5,5,16 (Lfx{MyLSTM}32 Lrx32 Lbx16)]', (self.batch_size, self.max_height, self.max_width, 96))
'Test a heterogeneous series with scaling.'
def testScalingOps(self):
self.ExpectScaledSize('[Cs5,5,16 Mp{MyPool}2,2 Ct3,3,32 Mp3,3 Lfx32 Lry64]', (self.batch_size, (self.max_height / 6), (self.max_width / 6), 64), 6)
'Test a heterogeneous series with reduction of x-dimension.'
def testXReduction(self):
self.ExpectScaledSize('[Cr5,5,16 Mp2,2 Ct3,3,32 Mp3,3 Lfxs32 Lry64]', (self.batch_size, (self.max_height / 6), 1, 64), 6)
'Test a heterogeneous series with reduction of y-dimension.'
def testYReduction(self):
self.ExpectScaledSize('[Cl5,5,16 Mp2,2 Ct3,3,32 Mp3,3 Lfys32 Lfx64]', (self.batch_size, 1, (self.max_width / 6), 64), 6)
'Test a heterogeneous series with reduction to 0-d.'
def testXYReduction(self):
self.ExpectScaledSize('[Cr5,5,16 Lfys32 Lfxs64 Fr{MyFC}16 Ft20 Fl12 Fs32 Fm40]', (self.batch_size, 1, 1, 40))
'Tests that a tiled input can be reshaped to the batch dimension.'
def testReshapeTile(self):
self.ExpectScaledSize('[S2(3x0)0,2 Cr5,5,16 Lfys16]', ((self.batch_size * 3), 1, (self.max_width / 3), 16), 3)
'Tests that depth can be reshaped to the x dimension.'
def testReshapeDepth(self):
self.ExpectScaledSize('[Cl5,5,16 Mp3,3 (Lrys32 Lbys16 Lfys32) S3(3x0)2,3]', (self.batch_size, 1, self.max_width, 32))
'Constructs a Decoder. Reads the text file describing the encoding and build the encoder. The text file contains lines of the form: <code>[,<code>]*\t<string> Each line defines a mapping from a sequence of one or more integer codes to a corresponding utf-8 string. Args: filename: Name of file defining the decoding se...
def __init__(self, filename):
self.decoder = [] if filename: self._InitializeDecoder(filename)
'Evaluate a model in softmax mode. Adds char, word recall and sequence error rate events to the sw summary writer, and returns them as well TODO(rays) Add LogisticEval. Args: sess: A tensor flow Session. model: The model to run in the session. Requires a VGSLImageModel or any other class that has a using_ctc attribute...
def SoftmaxEval(self, sess, model, num_steps):
coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) total_label_counts = ec.ErrorCounts(0, 0, 0, 0) total_word_counts = ec.ErrorCounts(0, 0, 0, 0) sequence_errors = 0 for _ in xrange(num_steps): (softmax_result, labels) = model.RunAStep(sess) ...
'Decodes CTC output to a string. Extracts only sequences of codes that are allowed by self.decoder. Labels that make illegal code sequences are dropped. Note that, by its nature of taking only top choices, this is much weaker than a full-blown beam search that considers all the softmax outputs. For languages without ma...
def StringFromCTC(self, ctc_labels, merge_dups, null_label):
codes = self._CodesFromCTC(ctc_labels, merge_dups, null_label) length = len(codes) if (length == 0): return '' strings = [] partials = [] for pos in xrange(length): code = codes[pos] parts = self.decoder[code] partials.append([]) strings.append('') ...
'Reads the decoder file and initializes self.decoder from it. Args: filename: Name of text file mapping codes to utf8 strings. Raises: ValueError: if the input file is not parsed correctly.'
def _InitializeDecoder(self, filename):
line_re = re.compile('(?P<codes>\\d+(,\\d+)*)\\t(?P<utf8>.+)') with tf.gfile.GFile(filename) as f: for line in f: m = line_re.match(line) if (m is None): raise ValueError('Unmatched line:', line) str_codes = m.groupdict()['codes'].split(',') ...
'Collapses CTC output to regular output. Args: ctc_labels: List of class labels including null characters to remove. merge_dups: If True, Duplicate labels will be merged. null_label: Label value to ignore. All trailing zeros are removed!! TODO(rays) This may become a problem with non-CTC models. If using charset, this ...
def _CodesFromCTC(self, ctc_labels, merge_dups, null_label):
out_labels = [] prev_label = (-1) zeros_needed = 0 for label in ctc_labels: if (label == null_label): prev_label = (-1) elif ((label != prev_label) or (not merge_dups)): if (label == 0): zeros_needed += 1 else: if (merge...
'Initialization. Currently only support amortized tracking. Args: total_examples: total number of examples.'
def __init__(self, total_examples):
assert (total_examples > 0) self._total_examples = total_examples self._eps_squared_sum = tf.Variable(tf.zeros([1]), trainable=False, name='eps_squared_sum') self._delta_sum = tf.Variable(tf.zeros([1]), trainable=False, name='delta_sum')
'Accumulate the privacy spending. Currently only support approximate privacy. Here we assume we use Gaussian noise on randomly sampled batch so we get better composition: 1. the per batch privacy is computed using privacy amplication via sampling bound; 2. the composition is done using the composition with Gaussian noi...
def accumulate_privacy_spending(self, eps_delta, unused_sigma, num_examples):
(eps, delta) = eps_delta with tf.control_dependencies([tf.Assert(tf.greater(delta, 0), ['delta needs to be greater than 0'])]): amortize_ratio = ((tf.cast(num_examples, tf.float32) * 1.0) / self._total_examples) amortize_eps = tf.reshape(tf.log((1.0 + (amortize_ratio * (tf.exp(...
'Report the spending so far. Args: sess: the session to run the tensor. target_eps: the target epsilon. Unused. Returns: the list containing a single EpsDelta, with values as Python floats (as opposed to numpy.float64). This is to be consistent with MomentAccountant which can return a list of (eps, delta) pair.'
def get_privacy_spent(self, sess, target_eps=None):
unused_target_eps = target_eps (eps_squared_sum, delta_sum) = sess.run([self._eps_squared_sum, self._delta_sum]) return [EpsDelta(math.sqrt(eps_squared_sum), float(delta_sum))]
'Initialize a MomentsAccountant. Args: total_examples: total number of examples. moment_orders: the order of moments to keep.'
def __init__(self, total_examples, moment_orders=32):
assert (total_examples > 0) self._total_examples = total_examples self._moment_orders = (moment_orders if isinstance(moment_orders, (list, tuple)) else range(1, (moment_orders + 1))) self._max_moment_order = max(self._moment_orders) assert (self._max_moment_order < 100), 'The moment order i...
'Compute high moment of privacy loss. Args: sigma: the noise sigma, in the multiples of the sensitivity. q: the sampling ratio. moment_order: the order of moment. Returns: log E[exp(moment_order * X)]'
@abc.abstractmethod def _compute_log_moment(self, sigma, q, moment_order):
pass
'Accumulate privacy spending. In particular, accounts for privacy spending when we assume there are num_examples, and we are releasing the vector (sum_{i=1}^{num_examples} x_i) + Normal(0, stddev=l2norm_bound*sigma) where l2norm_bound is the maximum l2_norm of each example x_i, and the num_examples have been randomly s...
def accumulate_privacy_spending(self, unused_eps_delta, sigma, num_examples):
q = ((tf.cast(num_examples, tf.float64) * 1.0) / self._total_examples) moments_accum_ops = [] for i in range(len(self._log_moments)): moment = self._compute_log_moment(sigma, q, self._moment_orders[i]) moments_accum_ops.append(tf.assign_add(self._log_moments[i], moment)) return tf.group(...
'Compute delta for given log_moments and eps. Args: log_moments: the log moments of privacy loss, in the form of pairs of (moment_order, log_moment) eps: the target epsilon. Returns: delta'
def _compute_delta(self, log_moments, eps):
min_delta = 1.0 for (moment_order, log_moment) in log_moments: if (math.isinf(log_moment) or math.isnan(log_moment)): sys.stderr.write(('The %d-th order is inf or Nan\n' % moment_order)) continue if (log_moment < (moment_order * eps)): min_de...
'Compute privacy spending in (e, d)-DP form for a single or list of eps. Args: sess: the session to run the tensor. target_eps: a list of target epsilon\'s for which we would like to compute corresponding delta value. target_deltas: a list of target deltas for which we would like to compute the corresponding eps value....
def get_privacy_spent(self, sess, target_eps=None, target_deltas=None):
assert ((target_eps is None) ^ (target_deltas is None)) eps_deltas = [] log_moments = sess.run(self._log_moments) log_moments_with_order = zip(self._moment_orders, log_moments) if (target_eps is not None): for eps in target_eps: eps_deltas.append(EpsDelta(eps, self._compute_delta...
'Initialization. Args: total_examples: total number of examples. moment_orders: the order of moments to keep.'
def __init__(self, total_examples, moment_orders=32):
super(self.__class__, self).__init__(total_examples, moment_orders) self._binomial_table = utils.GenerateBinomialTable(self._max_moment_order)
'Compute 0 to t-th differential moments for Gaussian variable. E[(P(x+s)/P(x+s-1)-1)^t] = sum_{i=0}^t (t choose i) (-1)^{t-i} E[(P(x+s)/P(x+s-1))^i] = sum_{i=0}^t (t choose i) (-1)^{t-i} E[exp(-i*(2*x+2*s-1)/(2*sigma^2))] = sum_{i=0}^t (t choose i) (-1)^{t-i} exp(i(i+1-2*s)/(2 sigma^2)) Args: sigma: the noise sigma, in...
def _differential_moments(self, sigma, s, t):
assert (t <= self._max_moment_order), ('The order of %d is out of the upper bound %d.' % (t, self._max_moment_order)) binomial = tf.slice(self._binomial_table, [0, 0], [(t + 1), (t + 1)]) signs = numpy.zeros(((t + 1), (t + 1)), dtype=numpy.float64) for i in range((t + 1)): ...
'Compute high moment of privacy loss. Args: sigma: the noise sigma, in the multiples of the sensitivity. q: the sampling ratio. moment_order: the order of moment. Returns: log E[exp(moment_order * X)]'
def _compute_log_moment(self, sigma, q, moment_order):
assert (moment_order <= self._max_moment_order), ('The order of %d is out of the upper bound %d.' % (moment_order, self._max_moment_order)) binomial_table = tf.slice(self._binomial_table, [moment_order, 0], [1, (moment_order + 1)]) qs = tf.exp((tf.constant([(i * 1.0) for i in r...
'Associates `op_name` key with `pxg_class` value. Registers `pxg_class` as the class that will be called to perform per-example differentiation through ops with `op_name`. Args: op_name: String op name. pxg_class: An instance of any class with the same signature as MatMulPXG.'
def Register(self, op_name, pxg_class):
self.d[op_name] = pxg_class
'Construct an instance of the rule for `op`. Args: op: The Operation to differentiate through. colocate_gradients_with_ops: currently unsupported gate_gradients: currently unsupported'
def __init__(self, op, colocate_gradients_with_ops=False, gate_gradients=False):
assert (op.node_def.op == 'MatMul') self.op = op self.colocate_gradients_with_ops = colocate_gradients_with_ops self.gate_gradients = gate_gradients
'Build the graph for the per-example gradient through the op. Assumes that the MatMul was called with a design matrix with examples in rows as the first argument and parameters as the second argument. Args: x: The Tensor to differentiate with respect to. This tensor must represent the weights. z_grads: The list of grad...
def __call__(self, x, z_grads):
idx = list(self.op.inputs).index(x) assert (idx != (-1)) assert (len(z_grads) == len(self.op.outputs)) assert (idx == 1) (x, _) = self.op.inputs (z_grads,) = z_grads x_expanded = tf.expand_dims(x, 2) z_grads_expanded = tf.expand_dims(z_grads, 1) return tf.multiply(x_expanded, z_grads...
'conv2d run separately per example, to help compute per-example gradients. Args: input_: tensor containing a minibatch of images / feature maps. Shape [batch_size, rows, columns, channels] w: convolution kernels. Shape [kernel rows, kernel columns, input channels, output channels] strides: passed through to regular con...
def _PxConv2DBuilder(self, input_, w, strides, padding):
input_shape = [int(e) for e in input_.get_shape()] batch_size = input_shape[0] input_px = [tf.slice(input_, ([example] + ([0] * 3)), ([1] + input_shape[1:])) for example in xrange(batch_size)] for input_x in input_px: assert (int(input_x.get_shape()[0]) == 1) w_px = [tf.identity(w) for examp...
'Construct a differentially private gradient descent optimizer. The optimizer uses fixed privacy budget for each batch of training. Args: learning_rate: for GradientDescentOptimizer. eps_delta: EpsDelta pair for each epoch. sanitizer: for sanitizing the graident. sigma: noise sigma. If None, use eps_delta pair to compu...
def __init__(self, learning_rate, eps_delta, sanitizer, sigma=None, use_locking=False, name='DPGradientDescent', batches_per_lot=1):
super(DPGradientDescentOptimizer, self).__init__(learning_rate, use_locking, name) self._batches_per_lot = batches_per_lot self._grad_accum_dict = {} if (batches_per_lot > 1): self._batch_count = tf.Variable(1, dtype=tf.int32, trainable=False, name='batch_count') var_list = tf.trainable_...
'Compute the sanitized gradients. Args: loss: the loss tensor. var_list: the optional variables. add_noise: if true, then add noise. Always clip. Returns: a pair of (list of sanitized gradients) and privacy spending accumulation operations. Raises: TypeError: if var_list contains non-variable.'
def compute_sanitized_gradients(self, loss, var_list=None, add_noise=True):
self._assert_valid_dtypes([loss]) xs = [tf.convert_to_tensor(x) for x in var_list] px_grads = per_example_gradients.PerExampleGradients(loss, xs) sanitized_grads = [] for (px_grad, v) in zip(px_grads, var_list): tensor_name = utils.GetTensorOpName(v) sanitized_grad = self._sanitizer....
'Minimize using sanitized gradients. This gets a var_list which is the list of trainable variables. For each var in var_list, we defined a grad_accumulator variable during init. When batches_per_lot > 1, we accumulate the gradient update in those. At the end of each lot, we apply the update back to the variable. This h...
def minimize(self, loss, global_step=None, var_list=None, name=None):
if (var_list is None): var_list = tf.trainable_variables() for var in var_list: if (not isinstance(var, tf.Variable)): raise TypeError(('Argument is not a variable.Variable: %s' % var)) if (self._batches_per_lot == 1): sanitized_grads = self.compute_sanitiz...
'Construct an AmortizedGaussianSanitizer. Args: accountant: the privacy accountant. Expect an amortized one. default_option: the default ClipOptoin.'
def __init__(self, accountant, default_option):
self._accountant = accountant self._default_option = default_option self._options = {}
'Set options for an individual tensor. Args: tensor_name: the name of the tensor. option: clip option.'
def set_option(self, tensor_name, option):
self._options[tensor_name] = option
'Sanitize the given tensor. This santize a given tensor by first applying l2 norm clipping and then adding Gaussian noise. It calls the privacy accountant for updating the privacy spending. Args: x: the tensor to sanitize. eps_delta: a pair of eps, delta for (eps,delta)-DP. Use it to compute sigma if sigma is None. sig...
def sanitize(self, x, eps_delta, sigma=None, option=ClipOption(None, None), tensor_name=None, num_examples=None, add_noise=True):
if (sigma is None): (eps, delta) = eps_delta with tf.control_dependencies([tf.Assert(tf.greater(eps, 0), ['eps needs to be greater than 0']), tf.Assert(tf.greater(delta, 0), ['delta needs to be greater than 0'])]): sigma = (tf.sqrt((2.0 * tf.log((1.25 ...
'Initializes the cell. Args: num_units: Number of cell units. w_initializer: Initializer for the "W" (input) parameter matrices. u_initializer: Initializer for the "U" (recurrent) parameter matrices. b_initializer: Initializer for the "b" (bias) parameter vectors. activation: Cell activation function.'
def __init__(self, num_units, w_initializer, u_initializer, b_initializer, activation=tf.nn.tanh):
self._num_units = num_units self._w_initializer = w_initializer self._u_initializer = u_initializer self._b_initializer = b_initializer self._activation = activation
'Returns an initializer for the "W_h" parameter matrix. See equation (23) in the paper. The "W_h" parameter matrix is the concatenation of two parameter submatrices. The matrix returned is [U_z, U_r]. Returns: A Tensor with shape [num_units, 2 * num_units] as described above.'
def _w_h_initializer(self):
def _initializer(shape, dtype=tf.float32, partition_info=None): num_units = self._num_units assert (shape == [num_units, (2 * num_units)]) u_z = self._u_initializer([num_units, num_units], dtype, partition_info) u_r = self._u_initializer([num_units, num_units], dtype, partition_info)...
'Returns an initializer for the "W_x" parameter matrix. See equation (23) in the paper. The "W_x" parameter matrix is the concatenation of two parameter submatrices. The matrix returned is [W_z, W_r]. Args: input_dim: The dimension of the cell inputs. Returns: A Tensor with shape [input_dim, 2 * num_units] as described...
def _w_x_initializer(self, input_dim):
def _initializer(shape, dtype=tf.float32, partition_info=None): num_units = self._num_units assert (shape == [input_dim, (2 * num_units)]) w_z = self._w_initializer([input_dim, num_units], dtype, partition_info) w_r = self._w_initializer([input_dim, num_units], dtype, partition_info)...
'GRU cell with layer normalization.'
def __call__(self, inputs, state, scope=None):
input_dim = inputs.get_shape().as_list()[1] num_units = self._num_units with tf.variable_scope((scope or 'gru_cell')): with tf.variable_scope('gates'): w_h = tf.get_variable('w_h', [num_units, (2 * num_units)], initializer=self._w_h_initializer()) w_x = tf.get_variable('w_x',...
'Basic setup. The actual TensorFlow graph is constructed in build(). Args: config: Object containing configuration parameters. mode: "train", "eval" or "encode". input_reader: Subclass of tf.ReaderBase for reading the input serialized tf.Example protocol buffers. Defaults to TFRecordReader. Raises: ValueError: If mode ...
def __init__(self, config, mode='train', input_reader=None):
if (mode not in ['train', 'eval', 'encode']): raise ValueError(('Unrecognized mode: %s' % mode)) self.config = config self.mode = mode self.reader = (input_reader if input_reader else tf.TFRecordReader()) self.uniform_initializer = tf.random_uniform_initializer(minval=(- self.config.un...
'Builds the ops for reading input data. Outputs: self.encode_ids self.decode_pre_ids self.decode_post_ids self.encode_mask self.decode_pre_mask self.decode_post_mask'
def build_inputs(self):
if (self.mode == 'encode'): encode_ids = None decode_pre_ids = None decode_post_ids = None encode_mask = tf.placeholder(tf.int8, (None, None), name='encode_mask') decode_pre_mask = None decode_post_mask = None else: input_queue = input_ops.prefetch_input_d...
'Builds the word embeddings. Inputs: self.encode_ids self.decode_pre_ids self.decode_post_ids Outputs: self.encode_emb self.decode_pre_emb self.decode_post_emb'
def build_word_embeddings(self):
if (self.mode == 'encode'): encode_emb = tf.placeholder(tf.float32, (None, None, self.config.word_embedding_dim), 'encode_emb') decode_pre_emb = None decode_post_emb = None else: word_emb = tf.get_variable(name='word_embedding', shape=[self.config.vocab_size, self.config.word_emb...