desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Initializes a GRU cell. The Variables of the GRU cell are initialized in a way that exactly matches the skip-thoughts paper: recurrent weights are initialized from random orthonormal matrices and non-recurrent weights are initialized from random uniform matrices. Args: num_units: Number of output units. Returns: cell:...
def _initialize_gru_cell(self, num_units):
return gru_cell.LayerNormGRUCell(num_units, w_initializer=self.uniform_initializer, u_initializer=random_orthonormal_initializer, b_initializer=tf.constant_initializer(0.0))
'Builds the sentence encoder. Inputs: self.encode_emb self.encode_mask Outputs: self.thought_vectors Raises: ValueError: if config.bidirectional_encoder is True and config.encoder_dim is odd.'
def build_encoder(self):
with tf.variable_scope('encoder') as scope: length = tf.to_int32(tf.reduce_sum(self.encode_mask, 1), name='length') if self.config.bidirectional_encoder: if (self.config.encoder_dim % 2): raise ValueError('encoder_dim must be even when using a bidirec...
'Builds a sentence decoder. Args: name: Decoder name. embeddings: Batch of sentences to decode; a float32 Tensor with shape [batch_size, padded_length, emb_dim]. targets: Batch of target word ids; an int64 Tensor with shape [batch_size, padded_length]. mask: A 0/1 Tensor with shape [batch_size, padded_length]. initial_...
def _build_decoder(self, name, embeddings, targets, mask, initial_state, reuse_logits):
cell = self._initialize_gru_cell(self.config.encoder_dim) with tf.variable_scope(name) as scope: decoder_input = tf.pad(embeddings[:, :(-1), :], [[0, 0], [1, 0], [0, 0]], name='input') length = tf.reduce_sum(mask, 1, name='length') (decoder_output, _) = tf.nn.dynamic_rnn(cell=cell, input...
'Builds the sentence decoders. Inputs: self.decode_pre_emb self.decode_post_emb self.decode_pre_ids self.decode_post_ids self.decode_pre_mask self.decode_post_mask self.thought_vectors Outputs: self.target_cross_entropy_losses self.target_cross_entropy_loss_weights'
def build_decoders(self):
if (self.mode != 'encode'): self._build_decoder('decoder_pre', self.decode_pre_emb, self.decode_pre_ids, self.decode_pre_mask, self.thought_vectors, False) self._build_decoder('decoder_post', self.decode_post_emb, self.decode_post_ids, self.decode_post_mask, self.thought_vectors, True)
'Builds the loss Tensor. Outputs: self.total_loss'
def build_loss(self):
if (self.mode != 'encode'): total_loss = tf.losses.get_total_loss() tf.summary.scalar('losses/total', total_loss) self.total_loss = total_loss
'Builds the global step Tensor. Outputs: self.global_step'
def build_global_step(self):
self.global_step = tf.contrib.framework.create_global_step()
'Creates all ops for training, evaluation or encoding.'
def build(self):
self.build_inputs() self.build_word_embeddings() self.build_encoder() self.build_decoders() self.build_loss() self.build_global_step()
'Counts the number of parameters in the model at top level scope.'
def _countModelParameters(self):
counter = {} for v in tf.global_variables(): name = v.op.name.split('/')[0] num_params = v.get_shape().num_elements() if (not num_params): self.fail(('Could not infer num_elements from Variable %s' % v.op.name)) counter[name] = (counter.get(name, 0) ...
'Verifies the number of parameters in the model.'
def _checkModelParameters(self):
param_counts = self._countModelParameters() expected_param_counts = {'word_embedding': 12400000, 'encoder': 21772800, 'decoder_pre': 21772800, 'decoder_post': 21772800, 'logits': 48020000, 'global_step': 1} self.assertDictEqual(expected_param_counts, param_counts)
'Verifies that the model produces expected outputs. Args: expected_shapes: A dict mapping Tensor or Tensor name to expected output shape. feed_dict: Values of Tensors to feed into Session.run().'
def _checkOutputs(self, expected_shapes, feed_dict=None):
fetches = expected_shapes.keys() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) outputs = sess.run(fetches, feed_dict) for (index, output) in enumerate(outputs): tensor = fetches[index] expected = expected_shapes[tensor] actual = output....
'Loads a skip-thoughts model. Args: model_config: Object containing parameters for building the model. vocabulary_file: Path to vocabulary file containing a list of newline- separated words where the word id is the corresponding 0-based index in the file. embedding_matrix_file: Path to a serialized numpy array of shape...
def load_model(self, model_config, vocabulary_file, embedding_matrix_file, checkpoint_path):
tf.logging.info('Reading vocabulary from %s', vocabulary_file) with tf.gfile.GFile(vocabulary_file, mode='r') as f: lines = list(f.readlines()) reverse_vocab = [line.decode('utf-8').strip() for line in lines] tf.logging.info('Loaded vocabulary with %d words.', len(reverse_vo...
'Encodes a sequence of sentences as skip-thought vectors. Args: data: A list of input strings. use_norm: If True, normalize output skip-thought vectors to unit L2 norm. verbose: Whether to log every batch. batch_size: Batch size for the RNN encoders. use_eos: If True, append the end-of-sentence word to each input sente...
def encode(self, data, use_norm=True, verbose=False, batch_size=128, use_eos=False):
if (not self.encoders): raise ValueError('Must call load_model at least once before calling encode.') encoded = [] for (encoder, sess) in zip(self.encoders, self.sessions): encoded.append(np.array(encoder.encode(sess, data, use_norm=use_norm, verbose=verbose, batch_si...
'Closes the active TensorFlow Sessions.'
def close(self):
for sess in self.sessions: sess.close()
'Initializes the encoder. Args: embeddings: Dictionary of word to embedding vector (1D numpy array).'
def __init__(self, embeddings):
self._sentence_detector = nltk.data.load('tokenizers/punkt/english.pickle') self._embeddings = embeddings
'Creates a function that restores a model from checkpoint. Args: checkpoint_path: Checkpoint file or a directory containing a checkpoint file. saver: Saver for restoring variables from the checkpoint file. Returns: restore_fn: A function such that restore_fn(sess) loads model variables from the checkpoint file. Raises:...
def _create_restore_fn(self, checkpoint_path, saver):
if tf.gfile.IsDirectory(checkpoint_path): latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path) if (not latest_checkpoint): raise ValueError(('No checkpoint file found in: %s' % checkpoint_path)) checkpoint_path = latest_checkpoint def _restore_fn(ses...
'Builds the inference graph from a configuration object. Args: model_config: Object containing configuration for building the model. checkpoint_path: Checkpoint file or a directory containing a checkpoint file. Returns: restore_fn: A function such that restore_fn(sess) loads model variables from the checkpoint file.'
def build_graph_from_config(self, model_config, checkpoint_path):
tf.logging.info('Building model.') model = skip_thoughts_model.SkipThoughtsModel(model_config, mode='encode') model.build() saver = tf.train.Saver() return self._create_restore_fn(checkpoint_path, saver)
'Builds the inference graph from serialized GraphDef and SaverDef protos. Args: graph_def_file: File containing a serialized GraphDef proto. saver_def_file: File containing a serialized SaverDef proto. checkpoint_path: Checkpoint file or a directory containing a checkpoint file. Returns: restore_fn: A function such tha...
def build_graph_from_proto(self, graph_def_file, saver_def_file, checkpoint_path):
tf.logging.info('Loading GraphDef from file: %s', graph_def_file) graph_def = tf.GraphDef() with tf.gfile.FastGFile(graph_def_file, 'rb') as f: graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='') tf.logging.info('Loading SaverDef from file: %s'...
'Tokenizes an input string into a list of words.'
def _tokenize(self, item):
tokenized = [] for s in self._sentence_detector.tokenize(item): tokenized.extend(nltk.tokenize.word_tokenize(s)) return tokenized
'Returns the embedding of a word.'
def _word_to_embedding(self, w):
return self._embeddings.get(w, self._embeddings[special_words.UNK])
'Preprocesses text for the encoder. Args: data: A list of input strings. use_eos: Whether to append the end-of-sentence word to each sentence. Returns: embeddings: A list of word embedding sequences corresponding to the input strings.'
def _preprocess(self, data, use_eos):
preprocessed_data = [] for item in data: tokenized = self._tokenize(item) if use_eos: tokenized.append(special_words.EOS) preprocessed_data.append([self._word_to_embedding(w) for w in tokenized]) return preprocessed_data
'Encodes a sequence of sentences as skip-thought vectors. Args: sess: TensorFlow Session. data: A list of input strings. use_norm: Whether to normalize skip-thought vectors to unit L2 norm. verbose: Whether to log every batch. batch_size: Batch size for the encoder. use_eos: Whether to append the end-of-sentence word t...
def encode(self, sess, data, use_norm=True, verbose=True, batch_size=128, use_eos=False):
data = self._preprocess(data, use_eos) thought_vectors = [] batch_indices = np.arange(0, len(data), batch_size) for (batch, start_index) in enumerate(batch_indices): if verbose: tf.logging.info('Batch %d / %d.', batch, len(batch_indices)) (embeddings, mask) = _batch_...
'Reads through the analogy question file. Returns: questions: a [n, 4] numpy array containing the analogy question\'s word ids. questions_skipped: questions skipped due to unknown words.'
def read_analogies(self):
questions = [] questions_skipped = 0 with open(self._options.eval_data, 'rb') as analogy_f: for line in analogy_f: if line.startswith(':'): continue words = line.strip().lower().split(' ') ids = [self._word2id.get(w.strip()) for w in words] ...
'Build the graph for the forward pass.'
def forward(self, examples, labels):
opts = self._options init_width = (0.5 / opts.emb_dim) emb = tf.Variable(tf.random_uniform([opts.vocab_size, opts.emb_dim], (- init_width), init_width), name='emb') self._emb = emb sm_w_t = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name='sm_w_t') sm_b = tf.Variable(tf.zeros([opts.vo...
'Build the graph for the NCE loss.'
def nce_loss(self, true_logits, sampled_logits):
opts = self._options true_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(true_logits), logits=true_logits) sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(sampled_logits), logits=sampled_logits) nce_loss_tensor = ((tf.reduce_sum(true_xent) + tf.reduce_sum(...
'Build the graph to optimize the loss function.'
def optimize(self, loss):
opts = self._options words_to_train = float((opts.words_per_epoch * opts.epochs_to_train)) lr = (opts.learning_rate * tf.maximum(0.0001, (1.0 - (tf.cast(self._words, tf.float32) / words_to_train)))) self._lr = lr optimizer = tf.train.GradientDescentOptimizer(lr) train = optimizer.minimize(loss, ...
'Build the eval graph.'
def build_eval_graph(self):
analogy_a = tf.placeholder(dtype=tf.int32) analogy_b = tf.placeholder(dtype=tf.int32) analogy_c = tf.placeholder(dtype=tf.int32) nemb = tf.nn.l2_normalize(self._emb, 1) a_emb = tf.gather(nemb, analogy_a) b_emb = tf.gather(nemb, analogy_b) c_emb = tf.gather(nemb, analogy_c) target = (c_em...
'Build the graph for the full model.'
def build_graph(self):
opts = self._options (words, counts, words_per_epoch, self._epoch, self._words, examples, labels) = word2vec.skipgram_word2vec(filename=opts.train_data, batch_size=opts.batch_size, window_size=opts.window_size, min_count=opts.min_count, subsample=opts.subsample) (opts.vocab_words, opts.vocab_counts, opts.wo...
'Save the vocabulary to a file so the model can be reloaded.'
def save_vocab(self):
opts = self._options with open(os.path.join(opts.save_path, 'vocab.txt'), 'w') as f: for i in xrange(opts.vocab_size): vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode('utf-8') f.write(('%s %d\n' % (vocab_word, opts.vocab_counts[i])))
'Train the model.'
def train(self):
opts = self._options (initial_epoch, initial_words) = self._session.run([self._epoch, self._words]) summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph) workers = [] for _ in xrange(opts.concurrent_steps): t = threading.Thread(ta...
'Predict the top 4 answers for analogy questions.'
def _predict(self, analogy):
(idx,) = self._session.run([self._analogy_pred_idx], {self._analogy_a: analogy[:, 0], self._analogy_b: analogy[:, 1], self._analogy_c: analogy[:, 2]}) return idx
'Evaluate analogy questions and reports accuracy.'
def eval(self):
correct = 0 try: total = self._analogy_questions.shape[0] except AttributeError as e: raise AttributeError('Need to read analogy questions.') start = 0 while (start < total): limit = (start + 2500) sub = self._analogy_questions[start:limit, :] idx ...
'Predict word w3 as in w0:w1 vs w2:w3.'
def analogy(self, w0, w1, w2):
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]]) idx = self._predict(wid) for c in [self._id2word[i] for i in idx[0, :]]: if (c not in [w0, w1, w2]): print(c) break print('unknown')
'Prints out nearby words given a list of words.'
def nearby(self, words, num=20):
ids = np.array([self._word2id.get(x, 0) for x in words]) (vals, idx) = self._session.run([self._nearby_val, self._nearby_idx], {self._nearby_word: ids}) for i in xrange(len(words)): print(('\n%s\n=====================================' % words[i])) for (neighbor, distance) in zip(idx[i, :num]...
'Reads through the analogy question file. Returns: questions: a [n, 4] numpy array containing the analogy question\'s word ids. questions_skipped: questions skipped due to unknown words.'
def read_analogies(self):
questions = [] questions_skipped = 0 with open(self._options.eval_data, 'rb') as analogy_f: for line in analogy_f: if line.startswith(':'): continue words = line.strip().lower().split(' ') ids = [self._word2id.get(w.strip()) for w in words] ...
'Build the model graph.'
def build_graph(self):
opts = self._options (words, counts, words_per_epoch, current_epoch, total_words_processed, examples, labels) = word2vec.skipgram_word2vec(filename=opts.train_data, batch_size=opts.batch_size, window_size=opts.window_size, min_count=opts.min_count, subsample=opts.subsample) (opts.vocab_words, opts.vocab_cou...
'Save the vocabulary to a file so the model can be reloaded.'
def save_vocab(self):
opts = self._options with open(os.path.join(opts.save_path, 'vocab.txt'), 'w') as f: for i in xrange(opts.vocab_size): vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode('utf-8') f.write(('%s %d\n' % (vocab_word, opts.vocab_counts[i])))
'Build the evaluation graph.'
def build_eval_graph(self):
opts = self._options analogy_a = tf.placeholder(dtype=tf.int32) analogy_b = tf.placeholder(dtype=tf.int32) analogy_c = tf.placeholder(dtype=tf.int32) nemb = tf.nn.l2_normalize(self._w_in, 1) a_emb = tf.gather(nemb, analogy_a) b_emb = tf.gather(nemb, analogy_b) c_emb = tf.gather(nemb, ana...
'Train the model.'
def train(self):
opts = self._options (initial_epoch, initial_words) = self._session.run([self._epoch, self._words]) workers = [] for _ in xrange(opts.concurrent_steps): t = threading.Thread(target=self._train_thread_body) t.start() workers.append(t) (last_words, last_time) = (initial_words, ...
'Predict the top 4 answers for analogy questions.'
def _predict(self, analogy):
(idx,) = self._session.run([self._analogy_pred_idx], {self._analogy_a: analogy[:, 0], self._analogy_b: analogy[:, 1], self._analogy_c: analogy[:, 2]}) return idx
'Evaluate analogy questions and reports accuracy.'
def eval(self):
correct = 0 try: total = self._analogy_questions.shape[0] except AttributeError as e: raise AttributeError('Need to read analogy questions.') start = 0 while (start < total): limit = (start + 2500) sub = self._analogy_questions[start:limit, :] idx ...
'Predict word w3 as in w0:w1 vs w2:w3.'
def analogy(self, w0, w1, w2):
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]]) idx = self._predict(wid) for c in [self._id2word[i] for i in idx[0, :]]: if (c not in [w0, w1, w2]): print(c) break print('unknown')
'Prints out nearby words given a list of words.'
def nearby(self, words, num=20):
ids = np.array([self._word2id.get(x, 0) for x in words]) (vals, idx) = self._session.run([self._nearby_val, self._nearby_idx], {self._nearby_word: ids}) for i in xrange(len(words)): print(('\n%s\n=====================================' % words[i])) for (neighbor, distance) in zip(idx[i, :num]...
'Create the model. Args: source_vocab_size: size of the source vocabulary. target_vocab_size: size of the target vocabulary. buckets: a list of pairs (I, O), where I specifies maximum input length that will be processed in that bucket, and O specifies maximum output length. Training instances that have inputs longer th...
def __init__(self, source_vocab_size, target_vocab_size, buckets, size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, use_lstm=False, num_samples=512, forward_only=False, dtype=tf.float32):
self.source_vocab_size = source_vocab_size self.target_vocab_size = target_vocab_size self.buckets = buckets self.batch_size = batch_size self.learning_rate = tf.Variable(float(learning_rate), trainable=False, dtype=dtype) self.learning_rate_decay_op = self.learning_rate.assign((self.learning_ra...
'Run a step of the model feeding the given inputs. Args: session: tensorflow session to use. encoder_inputs: list of numpy int vectors to feed as encoder inputs. decoder_inputs: list of numpy int vectors to feed as decoder inputs. target_weights: list of numpy float vectors to feed as target weights. bucket_id: which b...
def step(self, session, encoder_inputs, decoder_inputs, target_weights, bucket_id, forward_only):
(encoder_size, decoder_size) = self.buckets[bucket_id] if (len(encoder_inputs) != encoder_size): raise ValueError(('Encoder length must be equal to the one in bucket, %d != %d.' % (len(encoder_inputs), encoder_size))) if (len(decoder_inputs) != decoder_size): ...
'Get a random batch of data from the specified bucket, prepare for step. To feed data in step(..) it must be a list of batch-major vectors, while data here contains single length-major cases. So the main logic of this function is to re-index data cases to be in the proper format for feeding. Args: data: a tuple of size...
def get_batch(self, data, bucket_id):
(encoder_size, decoder_size) = self.buckets[bucket_id] (encoder_inputs, decoder_inputs) = ([], []) for _ in xrange(self.batch_size): (encoder_input, decoder_input) = random.choice(data[bucket_id]) encoder_pad = ([data_utils.PAD_ID] * (encoder_size - len(encoder_input))) encoder_input...
'Build the core model within the graph.'
def forward_pass(self, x, input_data_format='channels_last'):
if (self._data_format != input_data_format): if (input_data_format == 'channels_last'): x = tf.transpose(x, [0, 3, 1, 2]) else: x = tf.transpose(x, [0, 2, 3, 1]) x = ((x / 128) - 1) x = self._conv(x, 3, 16, 1) x = self._batch_norm(x) x = self._relu(x) res_...
'Parses a single tf.Example into image and label tensors.'
def parser(self, serialized_example):
features = tf.parse_single_example(serialized_example, features={'image': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64)}) image = tf.decode_raw(features['image'], tf.uint8) image.set_shape([((DEPTH * HEIGHT) * WIDTH)]) image = tf.cast(tf.transpose(tf.reshape(image, [DEPTH,...
'Read the images and labels from \'filenames\'.'
def make_batch(self, batch_size):
filenames = self.get_filenames() dataset = tf.contrib.data.TFRecordDataset(filenames).repeat() dataset = dataset.map(self.parser, num_threads=batch_size, output_buffer_size=(2 * batch_size)) if (self.subset == 'train'): min_queue_examples = int((Cifar10DataSet.num_examples_per_epoch(self.subset)...
'Preprocess a single image in [height, width, depth] layout.'
def preprocess(self, image):
if ((self.subset == 'train') and self.use_distortion): image = tf.image.resize_image_with_crop_or_pad(image, 40, 40) image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH]) image = tf.image.random_flip_left_right(image) return image
'ResNet constructor. Args: is_training: if build training or inference model. data_format: the data_format used during computation. one of \'channels_first\' or \'channels_last\'.'
def __init__(self, is_training, data_format):
self._is_training = is_training assert (data_format in ('channels_first', 'channels_last')) self._data_format = data_format
'Residual unit with 2 sub layers, using Plan A for shortcut connection.'
def _residual_v1(self, x, kernel_size, in_filter, out_filter, stride, activate_before_residual=False):
del activate_before_residual with tf.name_scope('residual_v1') as name_scope: orig_x = x x = self._conv(x, kernel_size, out_filter, stride) x = self._batch_norm(x) x = self._relu(x) x = self._conv(x, kernel_size, out_filter, 1) x = self._batch_norm(x) if (...
'Residual unit with 2 sub layers with preactivation, plan A shortcut.'
def _residual_v2(self, x, in_filter, out_filter, stride, activate_before_residual=False):
with tf.name_scope('residual_v2') as name_scope: if activate_before_residual: x = self._batch_norm(x) x = self._relu(x) orig_x = x else: orig_x = x x = self._batch_norm(x) x = self._relu(x) x = self._conv(x, 3, out_filte...
'Bottleneck residual unit with 3 sub layers, plan B shortcut.'
def _bottleneck_residual_v2(self, x, in_filter, out_filter, stride, activate_before_residual=False):
with tf.name_scope('bottle_residual_v2') as name_scope: if activate_before_residual: x = self._batch_norm(x) x = self._relu(x) orig_x = x else: orig_x = x x = self._batch_norm(x) x = self._relu(x) x = self._conv(x, 1, (o...
'Convolution.'
def _conv(self, x, kernel_size, filters, strides, is_atrous=False):
padding = 'SAME' if ((not is_atrous) and (strides > 1)): pad = (kernel_size - 1) pad_beg = (pad // 2) pad_end = (pad - pad_beg) if (self._data_format == 'channels_first'): x = tf.pad(x, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: ...
'Initializer for ExamplesPerSecondHook. Args: batch_size: Total batch size used to calculate examples/second from global time. every_n_steps: Log stats every n steps. every_n_secs: Log stats every n seconds.'
def __init__(self, batch_size, every_n_steps=100, every_n_secs=None):
if ((every_n_steps is None) == (every_n_secs is None)): raise ValueError('exactly one of every_n_steps and every_n_secs should be provided.') self._timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=every_n_steps, every_secs=every_n_secs) self._step_train_time = 0 ...
'Initializer for GpuParamServerDeviceSetter. Args: worker_device: the device to use for computation Ops. ps_devices: a list of devices to use for Variable Ops. Each variable is assigned to the least loaded device.'
def __init__(self, worker_device, ps_devices):
self.ps_devices = ps_devices self.worker_device = worker_device self.ps_sizes = ([0] * len(self.ps_devices))
'Loads a human readable English name for each softmax node. Args: label_lookup_path: string UID to integer node ID. uid_lookup_path: string UID to human-readable string. Returns: dict from integer node ID to human-readable string.'
def load(self, label_lookup_path, uid_lookup_path):
if (not tf.gfile.Exists(uid_lookup_path)): tf.logging.fatal('File does not exist %s', uid_lookup_path) if (not tf.gfile.Exists(label_lookup_path)): tf.logging.fatal('File does not exist %s', label_lookup_path) proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).rea...
'Basic setup. Args: config: Object containing configuration parameters. mode: "train", "eval" or "inference". train_inception: Whether the inception submodel variables are trainable.'
def __init__(self, config, mode, train_inception=False):
assert (mode in ['train', 'eval', 'inference']) self.config = config self.mode = mode self.train_inception = train_inception self.reader = tf.TFRecordReader() self.initializer = tf.random_uniform_initializer(minval=(- self.config.initializer_scale), maxval=self.config.initializer_scale) self...
'Returns true if the model is built for training mode.'
def is_training(self):
return (self.mode == 'train')
'Decodes and processes an image string. Args: encoded_image: A scalar string Tensor; the encoded image. thread_id: Preprocessing thread id used to select the ordering of color distortions. Returns: A float32 Tensor of shape [height, width, 3]; the processed image.'
def process_image(self, encoded_image, thread_id=0):
return image_processing.process_image(encoded_image, is_training=self.is_training(), height=self.config.image_height, width=self.config.image_width, thread_id=thread_id, image_format=self.config.image_format)
'Input prefetching, preprocessing and batching. Outputs: self.images self.input_seqs self.target_seqs (training and eval only) self.input_mask (training and eval only)'
def build_inputs(self):
if (self.mode == 'inference'): image_feed = tf.placeholder(dtype=tf.string, shape=[], name='image_feed') input_feed = tf.placeholder(dtype=tf.int64, shape=[None], name='input_feed') images = tf.expand_dims(self.process_image(image_feed), 0) input_seqs = tf.expand_dims(input_feed, 1) ...
'Builds the image model subgraph and generates image embeddings. Inputs: self.images Outputs: self.image_embeddings'
def build_image_embeddings(self):
inception_output = image_embedding.inception_v3(self.images, trainable=self.train_inception, is_training=self.is_training()) self.inception_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='InceptionV3') with tf.variable_scope('image_embedding') as scope: image_embeddings = tf.cont...
'Builds the input sequence embeddings. Inputs: self.input_seqs Outputs: self.seq_embeddings'
def build_seq_embeddings(self):
with tf.variable_scope('seq_embedding'): with tf.device('/cpu:0'): embedding_map = tf.get_variable(name='map', shape=[self.config.vocab_size, self.config.embedding_size], initializer=self.initializer) seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) self.se...
'Builds the model. Inputs: self.image_embeddings self.seq_embeddings self.target_seqs (training and eval only) self.input_mask (training and eval only) Outputs: self.total_loss (training and eval only) self.target_cross_entropy_losses (training and eval only) self.target_cross_entropy_loss_weights (training and eval on...
def build_model(self):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.config.num_lstm_units, state_is_tuple=True) if (self.mode == 'train'): lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, input_keep_prob=self.config.lstm_dropout_keep_prob, output_keep_prob=self.config.lstm_dropout_keep_prob) with tf.variabl...
'Sets up the function to restore inception variables from checkpoint.'
def setup_inception_initializer(self):
if (self.mode != 'inference'): saver = tf.train.Saver(self.inception_variables) def restore_fn(sess): tf.logging.info('Restoring Inception variables from checkpoint file %s', self.config.inception_checkpoint_file) saver.restore(sess, self.config.inception_ch...
'Sets up the global step Tensor.'
def setup_global_step(self):
global_step = tf.Variable(initial_value=0, name='global_step', trainable=False, collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES]) self.global_step = global_step
'Creates all ops for training and evaluation.'
def build(self):
self.build_inputs() self.build_image_embeddings() self.build_seq_embeddings() self.build_model() self.setup_inception_initializer() self.setup_global_step()
'Counts the number of parameters in the inception model at top scope.'
def _countInceptionParameters(self):
counter = {} for v in tf.global_variables(): name_tokens = v.op.name.split('/') if (name_tokens[0] == 'InceptionV3'): name = ('InceptionV3/' + name_tokens[1]) num_params = v.get_shape().num_elements() assert num_params counter[name] = (counter.get(...
'Verifies the number of parameters in the inception model.'
def _verifyParameterCounts(self):
param_counts = self._countInceptionParameters() expected_param_counts = {'InceptionV3/Conv2d_1a_3x3': 960, 'InceptionV3/Conv2d_2a_3x3': 9312, 'InceptionV3/Conv2d_2b_3x3': 18624, 'InceptionV3/Conv2d_3b_1x1': 5360, 'InceptionV3/Conv2d_4a_3x3': 138816, 'InceptionV3/Mixed_5b': 256368, 'InceptionV3/Mixed_5c': 277968...
'Counts the number of parameters in the model at top level scope.'
def _countModelParameters(self):
counter = {} for v in tf.global_variables(): name = v.op.name.split('/')[0] num_params = v.get_shape().num_elements() assert num_params counter[name] = (counter.get(name, 0) + num_params) return counter
'Verifies the number of parameters in the model.'
def _checkModelParameters(self):
param_counts = self._countModelParameters() expected_param_counts = {'InceptionV3': 21802784, 'image_embedding': 1048576, 'seq_embedding': 6144000, 'lstm': 2099200, 'logits': 6156000, 'global_step': 1} self.assertDictEqual(expected_param_counts, param_counts)
'Verifies that the model produces expected outputs. Args: expected_shapes: A dict mapping Tensor or Tensor name to expected output shape. feed_dict: Values of Tensors to feed into Session.run().'
def _checkOutputs(self, expected_shapes, feed_dict=None):
fetches = expected_shapes.keys() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) outputs = sess.run(fetches, feed_dict) for (index, output) in enumerate(outputs): tensor = fetches[index] expected = expected_shapes[tensor] actual = output....
'Initializes the Caption. Args: sentence: List of word ids in the caption. state: Model state after generating the previous word. logprob: Log-probability of the caption. score: Score of the caption. metadata: Optional metadata associated with the partial sentence. If not None, a list of strings with the same length as...
def __init__(self, sentence, state, logprob, score, metadata=None):
self.sentence = sentence self.state = state self.logprob = logprob self.score = score self.metadata = metadata
'Compares Captions by score.'
def __cmp__(self, other):
assert isinstance(other, Caption) if (self.score == other.score): return 0 elif (self.score < other.score): return (-1) else: return 1
'Pushes a new element.'
def push(self, x):
assert (self._data is not None) if (len(self._data) < self._n): heapq.heappush(self._data, x) else: heapq.heappushpop(self._data, x)
'Extracts all elements from the TopN. This is a destructive operation. The only method that can be called immediately after extract() is reset(). Args: sort: Whether to return the elements in descending sorted order. Returns: A list of data; the top n elements provided to the set.'
def extract(self, sort=False):
assert (self._data is not None) data = self._data self._data = None if sort: data.sort(reverse=True) return data
'Returns the TopN to an empty state.'
def reset(self):
self._data = []
'Initializes the generator. Args: model: Object encapsulating a trained image-to-text model. Must have methods feed_image() and inference_step(). For example, an instance of InferenceWrapperBase. vocab: A Vocabulary object. beam_size: Beam size to use when generating captions. max_caption_length: The maximum caption le...
def __init__(self, model, vocab, beam_size=3, max_caption_length=20, length_normalization_factor=0.0):
self.vocab = vocab self.model = model self.beam_size = beam_size self.max_caption_length = max_caption_length self.length_normalization_factor = length_normalization_factor
'Runs beam search caption generation on a single image. Args: sess: TensorFlow Session object. encoded_image: An encoded image string. Returns: A list of Caption sorted by descending score.'
def beam_search(self, sess, encoded_image):
initial_state = self.model.feed_image(sess, encoded_image) initial_beam = Caption(sentence=[self.vocab.start_id], state=initial_state[0], logprob=0.0, score=0.0, metadata=['']) partial_captions = TopN(self.beam_size) partial_captions.push(initial_beam) complete_captions = TopN(self.beam_size) fo...
'Builds the model for inference. Args: model_config: Object containing configuration for building the model. Returns: model: The model object.'
def build_model(self, model_config):
tf.logging.fatal('Please implement build_model in subclass')
'Creates a function that restores a model from checkpoint. Args: checkpoint_path: Checkpoint file or a directory containing a checkpoint file. saver: Saver for restoring variables from the checkpoint file. Returns: restore_fn: A function such that restore_fn(sess) loads model variables from the checkpoint file. Raises:...
def _create_restore_fn(self, checkpoint_path, saver):
if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) if (not checkpoint_path): raise ValueError(('No checkpoint file found in: %s' % checkpoint_path)) def _restore_fn(sess): tf.logging.info('Loading model ...
'Builds the inference graph from a configuration object. Args: model_config: Object containing configuration for building the model. checkpoint_path: Checkpoint file or a directory containing a checkpoint file. Returns: restore_fn: A function such that restore_fn(sess) loads model variables from the checkpoint file.'
def build_graph_from_config(self, model_config, checkpoint_path):
tf.logging.info('Building model.') self.build_model(model_config) saver = tf.train.Saver() return self._create_restore_fn(checkpoint_path, saver)
'Builds the inference graph from serialized GraphDef and SaverDef protos. Args: graph_def_file: File containing a serialized GraphDef proto. saver_def_file: File containing a serialized SaverDef proto. checkpoint_path: Checkpoint file or a directory containing a checkpoint file. Returns: restore_fn: A function such tha...
def build_graph_from_proto(self, graph_def_file, saver_def_file, checkpoint_path):
tf.logging.info('Loading GraphDef from file: %s', graph_def_file) graph_def = tf.GraphDef() with tf.gfile.FastGFile(graph_def_file, 'rb') as f: graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='') tf.logging.info('Loading SaverDef from file: %s'...
'Feeds an image and returns the initial model state. See comments at the top of file. Args: sess: TensorFlow Session object. encoded_image: An encoded image string. Returns: state: A numpy array of shape [1, state_size].'
def feed_image(self, sess, encoded_image):
tf.logging.fatal('Please implement feed_image in subclass')
'Runs one step of inference. Args: sess: TensorFlow Session object. input_feed: A numpy array of shape [batch_size]. state_feed: A numpy array of shape [batch_size, state_size]. Returns: softmax_output: A numpy array of shape [batch_size, vocab_size]. new_state: A numpy array of shape [batch_size, state_size]. metadata...
def inference_step(self, sess, input_feed, state_feed):
tf.logging.fatal('Please implement inference_step in subclass')
'Tests that beam search generates the expected captions. Args: expected_captions: A sequence of pairs (sentence, probability), where sentence is a list of integer ids and probability is a float in [0, 1]. beam_size: Parameter passed to beam_search(). max_caption_length: Parameter passed to beam_search(). length_normali...
def _assertExpectedCaptions(self, expected_captions, beam_size=3, max_caption_length=20, length_normalization_factor=0):
expected_sentences = [c[0] for c in expected_captions] expected_probabilities = [c[1] for c in expected_captions] generator = caption_generator.CaptionGenerator(model=FakeModel(), vocab=FakeVocab(), beam_size=beam_size, max_caption_length=max_caption_length, length_normalization_factor=length_normalization_...
'Initializes the vocabulary. Args: vocab_file: File containing the vocabulary, where the words are the first whitespace-separated token on each line (other tokens are ignored) and the word ids are the corresponding line numbers. start_word: Special word denoting sentence start. end_word: Special word denoting sentence ...
def __init__(self, vocab_file, start_word='<S>', end_word='</S>', unk_word='<UNK>'):
if (not tf.gfile.Exists(vocab_file)): tf.logging.fatal('Vocab file %s not found.', vocab_file) tf.logging.info('Initializing vocabulary from file: %s', vocab_file) with tf.gfile.GFile(vocab_file, mode='r') as f: reverse_vocab = list(f.readlines()) reverse_vocab = ...
'Returns the integer word id of a word string.'
def word_to_id(self, word):
if (word in self.vocab): return self.vocab[word] else: return self.unk_id
'Returns the word string of an integer word id.'
def id_to_word(self, word_id):
if (word_id >= len(self.reverse_vocab)): return self.reverse_vocab[self.unk_id] else: return self.reverse_vocab[word_id]
'Sets the default model hyperparameters.'
def __init__(self):
self.input_file_pattern = None self.image_format = 'jpeg' self.values_per_input_shard = 2300 self.input_queue_capacity_factor = 2 self.num_input_reader_threads = 1 self.image_feature_name = 'image/data' self.caption_feature_name = 'image/caption_ids' self.vocab_size = 12000 self.num_...
'Sets the default training hyperparameters.'
def __init__(self):
self.num_examples_per_epoch = 586363 self.optimizer = 'SGD' self.initial_learning_rate = 2.0 self.learning_rate_decay_factor = 0.5 self.num_epochs_per_decay = 8.0 self.train_inception_learning_rate = 0.0005 self.clip_gradients = 5.0 self.max_checkpoints_to_keep = 5
'Initializes the vocabulary. Args: vocab: A dictionary of word to word_id. unk_id: Id of the special \'unknown\' word.'
def __init__(self, vocab, unk_id):
self._vocab = vocab self._unk_id = unk_id
'Returns the integer id of a word string.'
def word_to_id(self, word):
if (word in self._vocab): return self._vocab[word] else: return self._unk_id
'Embeds x using standard CNN architecture. Args: x: Batch of images as a 2-d Tensor [batch_size, -1]. Returns: A 2-d Tensor [batch_size, hidden_dim] of embedded images.'
def core_builder(self, x):
ch1 = (32 * 2) ch2 = (64 * 2) conv1_weights = tf.get_variable('conv1_w', [3, 3, self.num_channels, ch1], initializer=self.matrix_init) conv1_biases = tf.get_variable('conv1_b', [ch1], initializer=self.vector_init) conv1a_weights = tf.get_variable('conv1a_w', [3, 3, ch1, ch1], initializer=self.matrix...
'Sets up all components of the computation graph.'
def setup(self):
(self.x, self.y) = self.get_xy_placeholders() with tf.variable_scope('core', reuse=None): (self.loss, self.gradient_ops) = self.train(self.x, self.y) with tf.variable_scope('core', reuse=True): self.y_preds = self.eval(self.x, self.y) (self.mem_keys, self.mem_vals, self.mem_age, self.rec...
'Performs training steps on episodic input. Args: sess: A Tensorflow Session. x: A list of batches of images defining the episode. y: A list of batches of labels corresponding to x. clear_memory: Whether to clear the memory before the episode. Returns: List of losses the same length as the episode.'
def episode_step(self, sess, x, y, clear_memory=False):
outputs = [self.loss, self.gradient_ops] if clear_memory: self.clear_memory(sess) losses = [] for (xx, yy) in zip(x, y): out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy}) loss = out[0] losses.append(loss) return losses
'Predict the labels on a single batch of examples. Args: sess: A Tensorflow Session. x: A batch of images. y: The labels for the images in x. This allows for updating the memory. Returns: Predicted y.'
def predict(self, sess, x, y=None):
cur_memory = sess.run([self.mem_keys, self.mem_vals, self.mem_age]) outputs = [self.y_preds] if (y is None): ret = sess.run(outputs, feed_dict={self.x: x}) else: ret = sess.run(outputs, feed_dict={self.x: x, self.y: y}) sess.run([self.mem_reset_op], feed_dict={self.mem_keys_reset: cu...
'Predict the labels on an episode of examples. Args: sess: A Tensorflow Session. x: A list of batches of images. y: A list of labels for the images in x. This allows for updating the memory. clear_memory: Whether to clear the memory before the episode. Returns: List of predicted y.'
def episode_predict(self, sess, x, y, clear_memory=False):
cur_memory = sess.run([self.mem_keys, self.mem_vals, self.mem_age]) if clear_memory: self.clear_memory(sess) outputs = [self.y_preds] y_preds = [] for (xx, yy) in zip(x, y): out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy}) y_pred = out[0] y_preds.append(y_p...
'Generates a random batch for training or validation. Structures each element of the batch as an \'episode\'. Each episode contains episode_length examples and episode_width distinct labels. Args: data: A dictionary mapping label to list of examples. episode_length: Number of examples in each episode. episode_width: Di...
def sample_episode_batch(self, data, episode_length, episode_width, batch_size):
episodes_x = [[] for _ in xrange(episode_length)] episodes_y = [[] for _ in xrange(episode_length)] assert (len(data) >= episode_width) keys = data.keys() for b in xrange(batch_size): episode_labels = random.sample(keys, episode_width) remainder = (episode_length % episode_width) ...
'Performs training. Trains a model using episodic training. Every so often, runs some evaluations on validation data.'
def run(self):
(train_data, valid_data) = (self.train_data, self.valid_data) (input_dim, output_dim) = (self.input_dim, self.output_dim) (rep_dim, episode_length) = (self.rep_dim, self.episode_length) (episode_width, memory_size) = (self.episode_width, self.memory_size) batch_size = self.batch_size train_size ...