desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Gets the weights that need to be regularized.'
def get_l2_regularized_weights(self):
return self.regularized_weights
'Compute the attention term for the network unit.'
def attention(self, last_layer, attention_tensor):
h_tensor = attention_tensor focus_tensor = tf.nn.tanh((tf.matmul(h_tensor, self._component.get_variable('attention_weights_pm_0'), name='h_x_pm') + self._component.get_variable('attention_bias_0'))) context_tensor = tf.nn.tanh((tf.matmul(last_layer, self._component.get_variable('attention_weights_hm_0'), na...
'Initializes parameters required to run this network. Args: component: parent ComponentBuilderBase object. Parameters used to construct the network: hidden_layer_sizes: comma-separated list of ints, indicating the number of hidden units in each hidden layer. layer_norm_input (False): Whether or not to apply layer norma...
def __init__(self, component):
self._attrs = get_attrs_with_defaults(component.spec.network_unit.parameters, defaults={'hidden_layer_sizes': '', 'layer_norm_input': False, 'layer_norm_hidden': False, 'nonlinearity': 'relu', 'dropout_keep_prob': (-1.0), 'dropout_per_sequence': False, 'dropout_all_layers': False}) self._hidden_layer_sizes = (m...
'See base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
input_tensor = get_input_tensor(fixed_embeddings, linked_embeddings) if during_training: input_tensor.set_shape([None, self._concatenated_input_dim]) input_tensor = self._maybe_apply_dropout(input_tensor, stride) if self._layer_norm_input: input_tensor = self._layer_norm_input.normal...
'See base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
input_tensor = get_input_tensor(fixed_embeddings, linked_embeddings) assert (len(context_tensor_arrays) == 2) length = context_tensor_arrays[0].size() x2i = self._component.get_variable('x2i') h2i = self._component.get_variable('h2i') c2i = self._component.get_variable('c2i') bi = self._comp...
'Initializes kernels and biases for this convolutional net. Args: component: parent ComponentBuilderBase object. Parameters used to construct the network: widths: comma separated list of ints, number of steps input to the convolutional kernel at every layer. depths: comma separated list of ints, number of channels inpu...
def __init__(self, component):
super(ConvNetwork, self).__init__(component) self._attrs = get_attrs_with_defaults(component.spec.network_unit.parameters, defaults={'widths': '', 'depths': '', 'output_embedding_dim': 0, 'nonlinearity': 'relu', 'dropout_keep_prob': (-1.0), 'dropout_per_sequence': False}) self._weights = [] self._biases...
'Requires |stride|; otherwise see base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
if (stride is None): raise RuntimeError("ConvNetwork needs 'stride' and must be called in the bulk feature extractor component.") input_tensor = get_input_tensor_with_stride(fixed_embeddings, linked_embeddings, stride) del context_tensor_arrays, attention_tensor ...
'Initializes kernels and biases for this convolutional net. Parameters used to construct the network: depths: comma separated list of ints, number of channels input to the convolutional kernel at every layer. widths: comma separated list of ints, number of steps input to the convolutional kernel at every layer. relu_la...
def __init__(self, component):
parameters = component.spec.network_unit.parameters super(PairwiseConvNetwork, self).__init__(component) self._depths = [(self._concatenated_input_dim * 2)] self._depths.extend(map(int, parameters['depths'].split(','))) self._widths = map(int, parameters['widths'].split(',')) self._num_layers = ...
'Requires |stride|; otherwise see base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
if (stride is None): raise ValueError("PairwiseConvNetwork needs 'stride'") input_tensor = get_input_tensor_with_stride(fixed_embeddings, linked_embeddings, stride) del context_tensor_arrays, attention_tensor, during_training num_steps = tf.shape(input_tensor)[1] arg1 = tf.expand_dims(...
'Initializes exported layers.'
def __init__(self, component):
super(ExportFixedFeaturesNetwork, self).__init__(component) for feature_spec in component.spec.fixed_feature: name = feature_spec.name dim = self._fixed_feature_dims[name] self._layers.append(Layer(component, name, dim))
'See base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
check.Eq(len(self.layers), len(fixed_embeddings)) for index in range(len(fixed_embeddings)): check.Eq(self.layers[index].name, fixed_embeddings[index].name) return [fixed_embedding.tensor for fixed_embedding in fixed_embeddings]
'Initializes weights and layers. Args: component: Parent ComponentBuilderBase object.'
def __init__(self, component):
super(SplitNetwork, self).__init__(component) parameters = component.spec.network_unit.parameters self._num_slices = int(parameters['num_slices']) check.Gt(self._num_slices, 0, 'Invalid number of slices.') check.Eq((self._concatenated_input_dim % self._num_slices), 0, ('Input dimension ...
'Returns HTML for a container, which will be populated later. Args: height: CSS string representing the height of the element, default \'700px\'. script: Visualization script contents, if the defaults are unacceptable. init_message: Initial message to display. Returns: unicode with HTML contents.'
def initial_html(self, height='700px', script=None, init_message=None):
if (script is None): script = _load_viz_script() if (init_message is None): init_message = 'Type a sentence and press (enter) to see the trace.' (self.elt_id, div_html) = _container_div(height=height, contents='<strong>{}</strong>'.format(init_message)) html = ...
'Returns a JS script HTML fragment, which will populate the container. Args: trace: binary-encoded MasterTrace string. master_spec: Master spec proto (parsed), which can improve the layout. May be required in future versions. Returns: unicode with HTML contents.'
def show_trace(self, trace, master_spec=None):
html = '\n <meta charset="utf-8"/>\n <script type=\'text/javascript\'>\n document.getElementById("{elt_id}").innerHTML = ""; // Clear previous.\n visualizeToDiv({json}, "{elt_id}", {master_spec_json});\n </scri...
'Initialize the graph builder with parameters defining the network. Args: num_actions: int size of the set of parser actions num_features: int list of dimensions of the feature vectors num_feature_ids: int list of same length as num_features corresponding to the sizes of the input feature spaces embedding_sizes: int li...
def __init__(self, num_actions, num_features, num_feature_ids, embedding_sizes, hidden_layer_sizes, seed=None, gate_gradients=False, use_locking=False, embedding_init=1.0, relu_init=0.0001, bias_init=0.2, softmax_init=0.0001, averaging_decay=0.9999, use_averaging=True, check_parameters=True, check_every=1, allow_featur...
self._num_actions = num_actions self._num_features = num_features self._num_feature_ids = num_feature_ids self._embedding_sizes = embedding_sizes self._hidden_layer_sizes = hidden_layer_sizes self._seed = seed self._gate_gradients = gate_gradients self._use_locking = use_locking self...
'Add a model parameter w.r.t. we expect to compute gradients. _AddParam creates both regular parameters (usually for training) and averaged nodes (usually for inference). It returns one or the other based on the \'return_average\' arg. Args: shape: int list, tensor shape of the parameter to create dtype: tf.DataType, d...
def _AddParam(self, shape, dtype, name, initializer=None, return_average=False):
if (name not in self.params): step = tf.cast(self.GetStep(), tf.float32) with tf.name_scope(self._param_scope): self.params[name] = tf.get_variable(name, shape, dtype, initializer) param = self.params[name] if (initializer is not None): self.inits[...
'Adds an embedding matrix and passes the `features` vector through it.'
def _AddEmbedding(self, features, num_features, num_ids, embedding_size, index, return_average=False):
embedding_matrix = self._AddParam([num_ids, embedding_size], tf.float32, ('embedding_matrix_%d' % index), self._EmbeddingMatrixInitializer(index, embedding_size), return_average=return_average) embedding = EmbeddingLookupFeatures(embedding_matrix, tf.reshape(features, [(-1)], name=('feature_%d' % index)), self....
'Builds a feed-forward part of the net given features as input. The network topology is already defined in the constructor, so multiple calls to BuildForward build multiple networks whose parameters are all shared. It is the source of the input features and the use of the output that distinguishes each network. Args: f...
def _BuildNetwork(self, feature_endpoints, return_average=False):
assert (len(feature_endpoints) == self._feature_size) embeddings = [] for i in range(self._feature_size): embeddings.append(self._AddEmbedding(feature_endpoints[i], self._num_features[i], self._num_feature_ids[i], self._embedding_sizes[i], i, return_average=return_average)) last_layer = tf.conca...
'Cross entropy plus L2 loss on weights and biases of the hidden layers.'
def _AddCostFunction(self, batch_size, gold_actions, logits):
dense_golden = BatchedSparseToDense(gold_actions, self._num_actions) cross_entropy = tf.div(tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=dense_golden, logits=logits)), batch_size) regularized_params = [tf.nn.l2_loss(p) for (k, p) in self.params.items() if (k.startswith('weights') or k.starts...
'Builds the forward network only without the training operation. Args: task_context: file path from which to read the task context. batch_size: batch size to request from reader op. evaluation_max_steps: max number of parsing actions during evaluation, only used in beam parsing. corpus_name: name of the task input to r...
def AddEvaluation(self, task_context, batch_size, evaluation_max_steps=300, corpus_name='documents'):
def _AssignTransitionScores(): return tf.assign(nodes['transition_scores'], nodes['logits'], validate_shape=False) def _Pass(): return tf.constant((-1.0)) unused_evaluation_max_steps = evaluation_max_steps with tf.name_scope('evaluation'): nodes = self.evaluation nodes['t...
'Returns a learning rate that decays by 0.96 every decay_steps. Args: initial_learning_rate: initial value of the learning rate decay_steps: decay by 0.96 every this many steps Returns: learning rate variable.'
def _AddLearningRate(self, initial_learning_rate, decay_steps):
step = self.GetStep() return cf.with_dependencies([self._IncrementCounter(step)], tf.train.exponential_decay(initial_learning_rate, step, decay_steps, 0.96, staircase=True))
'Embeddings at the given index will be set to pretrained values.'
def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
def _Initializer(shape, dtype=tf.float32, partition_info=None): 'Variable initializer that loads pretrained embeddings.' unused_dtype = dtype (seed1, seed2) = tf.get_seed(self._seed) t = gen_parser_ops.word_embedding_initializer(vectors=embeddings_path, task_context=ta...
'Builds a trainer to minimize the cross entropy cost function. Args: task_context: file path from which to read the task context batch_size: batch size to request from reader op learning_rate: initial value of the learning rate decay_steps: decay learning rate by 0.96 every this many steps momentum: momentum parameter ...
def AddTraining(self, task_context, batch_size, learning_rate=0.1, decay_steps=4000, momentum=0.9, corpus_name='documents'):
with tf.name_scope('training'): nodes = self.training nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name)) nodes.update(self._BuildNetwork(nodes['feature_endpoints'], return_average=False)) nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'], node...
'Adds ops to save and restore model parameters. Args: slim_model: whether only averaged variables are saved. Returns: the saver object.'
def AddSaver(self, slim_model=False):
with tf.name_scope(None): variables_to_save = self.params.copy() variables_to_save.update(self.variables) if slim_model: for key in variables_to_save.keys(): if (not key.endswith('avg_var')): del variables_to_save[key] self.saver = tf.t...
'Adds an op capable of reading sentences and parsing them with a beam.'
def _AddBeamReader(self, task_context, batch_size, corpus_name, until_all_final=False, always_start_new_sentences=False):
(features, state, epochs) = gen_parser_ops.beam_parse_reader(task_context=task_context, feature_size=self._feature_size, beam_size=self._beam_size, batch_size=batch_size, corpus_name=corpus_name, allow_feature_weights=self._allow_feature_weights, arg_prefix=self._arg_prefix, continue_until_all_final=until_all_final...
'Adds a sequence of beam parsing steps.'
def _BuildSequence(self, batch_size, max_steps, features, state, use_average=False):
def Advance(state, step, scores_array, alive, alive_steps, *features): scores = self._BuildNetwork(features, return_average=use_average)['logits'] scores_array = scores_array.write(step, scores) (features, state, alive) = gen_parser_ops.beam_parser(state, scores, self._feature_size) ...
'Constructs a structured learning graph.'
def MakeGraph(self, max_steps=10, beam_size=2, batch_size=1, **kwargs):
assert (max_steps > 0), 'Empty network not supported.' logging.info('MakeGraph + %s', kwargs) with self.test_session(graph=tf.Graph()) as sess: (feature_sizes, domain_sizes, embedding_dims, num_actions) = sess.run(gen_parser_ops.feature_size(task_context=self._task_context)) embed...
'Ensures that the \'alive\' condition works in the Cond ops.'
def testParseUntilNotAlive(self):
with self.test_session(graph=tf.Graph()) as sess: t = self.MakeGraph(batch_size=3, beam_size=2, max_steps=5).training sess.run(t['inits']) for i in range(5): logging.info('run %d', i) tf_alive = t['alive'].eval() self.assertFalse(any(tf_alive))
'Ensures that Momentum training can be done using the gradients.'
def testParseMomentum(self):
self.Train() self.Train(model_cost='perceptron_loss') self.Train(model_cost='perceptron_loss', only_train='softmax_weight,softmax_bias', softmax_init=0) self.Train(only_train='softmax_weight,softmax_bias', softmax_init=0)
'Ensures that path scores computed in the beam are same in the net.'
def testPathScoresAgree(self):
(all_path_scores, beam_path_scores) = self.PathScores(iterations=1, beam_size=130, max_steps=5, batch_size=1) self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-06)
'Ensures that path scores computed in the beam are same in the net.'
def testBatchPathScoresAgree(self):
(all_path_scores, beam_path_scores) = self.PathScores(iterations=1, beam_size=130, max_steps=5, batch_size=22) self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-06)
'Ensures that path scores computed in the beam are same in the net.'
def testBatchOneStepPathScoresAgree(self):
(all_path_scores, beam_path_scores) = self.PathScores(iterations=1, beam_size=130, max_steps=1, batch_size=22) self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-06)
'Tests that Create can create the Impl subclass.'
def testCanCreateImpl(self):
try: impl = registry_test_base.Base.Create((PATH + 'registry_test_impl.Impl'), 'hello world') except ValueError: self.fail(('Create raised ValueError: %s' % traceback.format_exc())) self.assertEqual('hello world', impl.Get())
'Tests that Create can create an Impl subclass via Alias.'
def testCanCreateByAlias(self):
try: impl = registry_test_base.Base.Create((PATH + 'registry_test_impl.Alias'), 'hello world') except ValueError: self.fail(('Create raised ValueError: %s' % traceback.format_exc())) self.assertEqual('hello world', impl.Get())
'Tests that Create fails if the class is not a subclass of Base.'
def testCannotCreateNonSubclass(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'registry_test_impl.NonSubclass'), 'hello world')
'Tests that Create fails if the name does not identify a class.'
def testCannotCreateNonClass(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'registry_test_impl.variable'), 'hello world') with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'registry_test_impl.Function'), ...
'Tests that Create fails if the class does not exist in the module.'
def testCannotCreateMissingClass(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'registry_test_impl.MissingClass'), 'hello world')
'Tests that Create fails if the module does not exist.'
def testCannotCreateMissingModule(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'missing.SomeClass'), 'hello world')
'Tests that Create fails if the package does not exist.'
def testCannotCreateMissingPackage(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create('missing.package.path.module.SomeClass', 'hello world')
'Tests that Create fails on malformed type names.'
def testCannotCreateMalformedType(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create('oneword', 'hello world') with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create('hyphen-ated', 'hello world') with self.assertRaisesRegexp(Val...
'Tests that Create can create the Impl subclass using a relative path.'
def testCanCreateWithRelativePath(self):
for name in [(PATH + 'registry_test_impl.Impl'), 'syntaxnet.util.registry_test_impl.Impl', 'util.registry_test_impl.Impl', 'registry_test_impl.Impl']: value = ('created via %s' % name) try: impl = registry_test_base.Base.Create(name, value) except ValueError: se...
'Tests that Create fails if a relative path cannot be resolved.'
def testCannotResolveRelativeName(self):
for name in ['nlp.saft.opensource.syntaxnet.util.registry_test_base.Impl', 'saft.bad.registry_test_impl.Impl', 'missing.registry_test_impl.Impl', 'registry_test_impl.Bad', 'Impl']: with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create(name, 'hello ...
'Creates an implementation with a custom string.'
def __init__(self, value):
self.value = value
'Returns the current value.'
def Get(self):
return self.value
'Overridden in subclasses.'
def Get(self):
return None
'Constructs a postprocessor. Args: pca_params_npz_path: Path to a NumPy-format .npz file that contains the PCA parameters used in postprocessing.'
def __init__(self, pca_params_npz_path):
params = np.load(pca_params_npz_path) self._pca_matrix = params[vggish_params.PCA_EIGEN_VECTORS_NAME] self._pca_means = params[vggish_params.PCA_MEANS_NAME].reshape((-1), 1) assert (self._pca_matrix.shape == (vggish_params.EMBEDDING_SIZE, vggish_params.EMBEDDING_SIZE)), ('Bad PCA matrix shape: ...
'Applies postprocessing to a batch of embeddings. Args: embeddings_batch: An nparray of shape [batch_size, embedding_size] containing output from the embedding layer of VGGish. Returns: An nparray of the same shape as the input but of type uint8, containing the PCA-transformed and quantized version of the input.'
def postprocess(self, embeddings_batch):
assert (len(embeddings_batch.shape) == 2), ('Expected 2-d batch, got %r' % (embeddings_batch.shape,)) assert (embeddings_batch.shape[1] == vggish_params.EMBEDDING_SIZE), ('Bad batch shape: %r' % (embeddings_batch.shape,)) pca_applied = np.dot(self._pca_matrix, (embeddings_batch.T - self...
'Class members that will be assigned by any class that actually uses this class.'
def __init__(self):
self.restrict_to_largest_cc = None self.robot = None self.env = None self.category_list = None self.traversible = None
'Based on the node orientation returns X, and Y axis. Used to sample the map in egocentric coordinate frame.'
def get_loc_axis(self, node, delta_theta, perturb=None):
if (type(node) == tuple): node = np.array([node]) if (perturb is None): perturb = np.zeros((node.shape[0], 4)) xyt = self.to_actual_xyt_vec(node) x = (xyt[:, [0]] + perturb[:, [0]]) y = (xyt[:, [1]] + perturb[:, [1]]) t = (xyt[:, [2]] + perturb[:, [2]]) theta = (t * delta_the...
'Converts from node to location on the map.'
def to_actual_xyt(self, pqr):
(p, q, r) = pqr if (self.task.n_ori == 6): out = (((p - (q * 0.5)) + self.task.origin_loc[0]), (((q * np.sqrt(3.0)) / 2.0) + self.task.origin_loc[1]), r) elif (self.task.n_ori == 4): out = ((p + self.task.origin_loc[0]), (q + self.task.origin_loc[1]), r) return out
'Converts from node array to location array on the map.'
def to_actual_xyt_vec(self, pqr):
p = pqr[:, 0][:, np.newaxis] q = pqr[:, 1][:, np.newaxis] r = pqr[:, 2][:, np.newaxis] if (self.task.n_ori == 6): out = np.concatenate((((p - (q * 0.5)) + self.task.origin_loc[0]), (((q * np.sqrt(3.0)) / 2.0) + self.task.origin_loc[1]), r), axis=1) elif (self.task.n_ori == 4): out = ...
'Returns if the given set of nodes is valid or not.'
def raw_valid_fn_vec(self, xyt):
height = self.traversible.shape[0] width = self.traversible.shape[1] x = np.round(xyt[:, [0]]).astype(np.int32) y = np.round(xyt[:, [1]]).astype(np.int32) is_inside = np.all(np.concatenate(((x >= 0), (y >= 0), (x < width), (y < height)), axis=1), axis=1) x = np.minimum(np.maximum(x, 0), (width -...
'Returns if the given set of nodes is valid or not.'
def valid_fn_vec(self, pqr):
xyt = self.to_actual_xyt_vec(np.array(pqr)) height = self.traversible.shape[0] width = self.traversible.shape[1] x = np.round(xyt[:, [0]]).astype(np.int32) y = np.round(xyt[:, [1]]).astype(np.int32) is_inside = np.all(np.concatenate(((x >= 0), (y >= 0), (x < width), (y < height)), axis=1), axis=...
'Returns the feasible set of actions from the current node.'
def get_feasible_actions(self, node_ids):
a = np.zeros((len(node_ids), self.task_params.num_actions), dtype=np.int32) gtG = self.task.gtG next_node = [] for (i, c) in enumerate(node_ids): neigh = gtG.vertex(c).out_neighbours() neigh_edge = gtG.vertex(c).out_edges() nn = {} for (n, e) in zip(neigh, neigh_edge): ...
'Returns the new node after taking the action action. Stays at the current node if the action is invalid.'
def take_action(self, current_node_ids, action):
(actions, next_node_ids) = self.get_feasible_actions(current_node_ids) new_node_ids = [] for (i, (c, a)) in enumerate(zip(current_node_ids, action)): if (actions[(i, a)] == 1): new_node_ids.append(next_node_ids[i][a]) else: new_node_ids.append(c) return new_node_i...
'Sets the SwiftshaderRenderer object used for rendering.'
def set_r_obj(self, r_obj):
self.r_obj = r_obj
'Saves traversible space along with nodes generated on the graph. Takes the seed as input.'
def _debug_save_map_nodes(self, seed):
img_path = os.path.join(self.logdir, '{:s}_{:d}_graph.png'.format(self.building_name, seed)) node_xyt = self.to_actual_xyt_vec(self.task.nodes) plt.set_cmap('jet') (fig, ax) = utils.subplot(plt, (1, 1), (12, 12)) ax.plot(node_xyt[:, 0], node_xyt[:, 1], 'm.') ax.set_axis_off() ax.axis('equal'...
'Saves traversible space along with nodes generated on the graph. Takes the seed as input.'
def _debug_semantic_maps(self, seed):
for (i, cls) in enumerate(self.task_params.semantic_task.class_map_names): img_path = os.path.join(self.logdir, '{:s}_flip{:d}_{:s}_graph.png'.format(self.building_name, seed, cls)) maps = (self.traversible * 1.0) maps += (0.5 * self.task.class_maps_dilated[:, :, i]) write_traversibl...
'Sets up the task field for doing navigation on the grid world.'
def _preprocess_for_task(self, seed):
if ((self.task is None) or (self.task.seed != seed)): rng = np.random.RandomState(seed) origin_loc = get_graph_origin_loc(rng, self.traversible) self.task = utils.Foo(seed=seed, origin_loc=origin_loc, n_ori=self.task_params.n_ori) G = generate_graph(self.valid_fn_vec, self.task_param...
'In addition to returning the action, also returns the reward that the agent receives.'
def take_action(self, current_node_ids, action, step_number):
goal_number = (step_number / self.task_params.num_steps) new_node_ids = GridWorld.take_action(self, current_node_ids, action) rewards = [] for (i, n) in enumerate(new_node_ids): reward = 0 if (n == self.episode.goal_node_ids[goal_number][i]): reward = self.task_params.reward_...
'Returns the optimal action from the current node.'
def get_optimal_action(self, current_node_ids, step_number):
goal_number = (step_number / self.task_params.num_steps) gtG = self.task.gtG a = np.zeros((len(current_node_ids), self.task_params.num_actions), dtype=np.int32) d_dict = self.episode.dist_to_goal[goal_number] for (i, c) in enumerate(current_node_ids): neigh = gtG.vertex(c).out_neighbours() ...
'Returns the target actions from the current node.'
def get_targets(self, current_node_ids, step_number):
action = self.get_optimal_action(current_node_ids, step_number) action = np.expand_dims(action, axis=1) return vars(utils.Foo(action=action))
'Returns the list of names of the targets.'
def get_targets_name(self):
return ['action']
'Constructor. Args: image_diff_list: A list of (image, diff) tuples, with shape [batch_size, image_size, image_size, 3] and image_sizes as [32, 64, 128, 256]. params: Dict of parameters.'
def __init__(self, image_diff_list, params):
self.images = [i for (i, _) in image_diff_list] self.diffs = [((d + params['scale']) / 2) for (i, d) in image_diff_list] self.params = params
'Cross Convolution. The encoded image and kernel are of the same shape. Namely [batch_size, image_size, image_size, channels]. They are split into [image_size, image_size] image squares [kernel_size, kernel_size] kernel squares. kernel squares are used to convolute image squares.'
def _CrossConvHelper(self, encoded_image, kernel):
images = tf.expand_dims(encoded_image, 0) kernels = tf.expand_dims(kernel, 3) return tf.nn.depthwise_conv2d(images, kernels, [1, 1, 1, 1], 'SAME')
'Apply the motion kernel on the encoded_images.'
def _CrossConv(self, encoded_images):
cross_conved_images = [] kernels = tf.split(axis=3, num_or_size_splits=4, value=self.kernel) for (i, encoded_image) in enumerate(encoded_images): with tf.variable_scope(('cross_conv_%d' % i)): kernel = kernels[i] encoded_image = tf.unstack(encoded_image, axis=0) k...
'Decode the cross_conved feature maps into the predicted images.'
def _BuildImageDecoder(self, cross_conved_images):
nets = [] for (i, cross_conved_image) in enumerate(cross_conved_images): with tf.variable_scope(('image_decoder_%d' % i)): stride = (64 / cross_conved_image.get_shape().as_list()[1]) nets.append(self._Deconv(cross_conved_image, 64, kernel_size=3, stride=stride)) net = tf.conc...
'Create a DeploymentConfig. The config describes how to deploy a model across multiple clones and replicas. The model will be replicated `num_clones` times in each replica. If `clone_on_cpu` is True, each clone will placed on CPU. If `num_replicas` is 1, the model is deployed via a single process. In that case `worke...
def __init__(self, num_clones=1, clone_on_cpu=False, replica_id=0, num_replicas=1, num_ps_tasks=0, worker_job_name='worker', ps_job_name='ps'):
if (num_replicas > 1): if (num_ps_tasks < 1): raise ValueError('When using replicas num_ps_tasks must be positive') if ((num_replicas > 1) or (num_ps_tasks > 0)): if (not worker_job_name): raise ValueError('Must specify worker_job_name when u...
'Returns the device to use for caching variables. Variables are cached on the worker CPU when using replicas. Returns: A device string or None if the variables do not need to be cached.'
def caching_device(self):
if (self._num_ps_tasks > 0): return (lambda op: op.device) else: return None
'Device used to create the clone and all the ops inside the clone. Args: clone_index: Int, representing the clone_index. Returns: A value suitable for `tf.device()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones".'
def clone_device(self, clone_index):
if (clone_index >= self._num_clones): raise ValueError('clone_index must be less than num_clones') device = '' if (self._num_ps_tasks > 0): device += self._worker_device if self._clone_on_cpu: device += '/device:CPU:0' else: device += ('/device:GPU:%d' ...
'Name scope to create the clone. Args: clone_index: Int, representing the clone_index. Returns: A name_scope suitable for `tf.name_scope()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones".'
def clone_scope(self, clone_index):
if (clone_index >= self._num_clones): raise ValueError('clone_index must be less than num_clones') scope = '' if (self._num_clones > 1): scope = ('clone_%d' % clone_index) return scope
'Device to use with the optimizer. Returns: A value suitable for `tf.device()`.'
def optimizer_device(self):
if ((self._num_ps_tasks > 0) or (self._num_clones > 0)): return (self._worker_device + '/device:CPU:0') else: return ''
'Device to use to build the inputs. Returns: A value suitable for `tf.device()`.'
def inputs_device(self):
device = '' if (self._num_ps_tasks > 0): device += self._worker_device device += '/device:CPU:0' return device
'Returns the device to use for variables created inside the clone. Returns: A value suitable for `tf.device()`.'
def variables_device(self):
device = '' if (self._num_ps_tasks > 0): device += self._ps_device device += '/device:CPU:0' class _PSDeviceChooser(object, ): 'Slim device chooser for variables when using PS.' def __init__(self, device, tasks): self._device = device ...
'A plain ResNet without extra layers before or after the ResNet blocks.'
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
with tf.variable_scope(scope, values=[inputs]): with slim.arg_scope([slim.conv2d], outputs_collections='end_points'): net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) end_points = slim.utils.convert_collection_to_dict('end_points') return (net, end_poi...
'Test the end points of a tiny v1 bottleneck network.'
def testEndPointsV1(self):
blocks = [resnet_v1.resnet_v1_block('block1', base_depth=1, num_units=2, stride=2), resnet_v1.resnet_v1_block('block2', base_depth=2, num_units=2, stride=1)] inputs = create_test_input(2, 32, 16, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_plain(inputs, bl...
'A simplified ResNet Block stacker without output stride control.'
def _stack_blocks_nondense(self, net, blocks):
for block in blocks: with tf.variable_scope(block.scope, 'block', [net]): for (i, unit) in enumerate(block.args): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = block.unit_fn(net, rate=1, **unit) return net
'Verify the values of dense feature extraction by atrous convolution. Make sure that dense feature extraction by stack_blocks_dense() followed by subsampling gives identical results to feature extraction at the nominal network output stride using the simple self._stack_blocks_nondense() above.'
def testAtrousValuesBottleneck(self):
block = resnet_v1.resnet_v1_block blocks = [block('block1', base_depth=1, num_units=2, stride=2), block('block2', base_depth=2, num_units=2, stride=2), block('block3', base_depth=4, num_units=2, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] nominal_stride = 8 height = 30 width = 3...
'A shallow and thin ResNet v1 for faster tests.'
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope='resnet_v1_small'):
block = resnet_v1.resnet_v1_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] return resnet_v1.resnet_v1(inputs, blocks, num_c...
'Verify dense feature extraction with atrous convolution.'
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81,...
'A plain ResNet without extra layers before or after the ResNet blocks.'
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
with tf.variable_scope(scope, values=[inputs]): with slim.arg_scope([slim.conv2d], outputs_collections='end_points'): net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) end_points = slim.utils.convert_collection_to_dict('end_points') return (net, end_poi...
'Test the end points of a tiny v2 bottleneck network.'
def testEndPointsV2(self):
blocks = [resnet_v2.resnet_v2_block('block1', base_depth=1, num_units=2, stride=2), resnet_v2.resnet_v2_block('block2', base_depth=2, num_units=2, stride=1)] inputs = create_test_input(2, 32, 16, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_plain(inputs, bl...
'A simplified ResNet Block stacker without output stride control.'
def _stack_blocks_nondense(self, net, blocks):
for block in blocks: with tf.variable_scope(block.scope, 'block', [net]): for (i, unit) in enumerate(block.args): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = block.unit_fn(net, rate=1, **unit) return net
'Verify the values of dense feature extraction by atrous convolution. Make sure that dense feature extraction by stack_blocks_dense() followed by subsampling gives identical results to feature extraction at the nominal network output stride using the simple self._stack_blocks_nondense() above.'
def testAtrousValuesBottleneck(self):
block = resnet_v2.resnet_v2_block blocks = [block('block1', base_depth=1, num_units=2, stride=2), block('block2', base_depth=2, num_units=2, stride=2), block('block3', base_depth=4, num_units=2, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] nominal_stride = 8 height = 30 width = 3...
'A shallow and thin ResNet v2 for faster tests.'
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope='resnet_v2_small'):
block = resnet_v2.resnet_v2_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] return resnet_v2.resnet_v2(inputs, blocks, num_c...
'Verify dense feature extraction with atrous convolution.'
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81,...
'Constructs a SsdInceptionV2FeatureExtractor. Args: depth_multiplier: float depth multiplier for feature extractor Returns: an ssd_inception_v2_feature_extractor.SsdInceptionV2FeatureExtractor.'
def _create_feature_extractor(self, depth_multiplier):
min_depth = 32 conv_hyperparams = {} return ssd_inception_v2_feature_extractor.SSDInceptionV2FeatureExtractor(depth_multiplier, min_depth, conv_hyperparams)
'MobileNetV1 Feature Extractor for SSD Models. Args: depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops. reuse_weights: Whether to reuse variables. Default is None.'
def __init__(self, depth_multiplier, min_depth, conv_hyperparams, reuse_weights=None):
super(SSDMobileNetV1FeatureExtractor, self).__init__(depth_multiplier, min_depth, conv_hyperparams, reuse_weights)
'SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images.'
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)
'Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]'
def extract_features(self, preprocessed_inputs):
preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_m...
'Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor Returns: an ssd_meta_arch.SSDFeatureExtractor object.'
def _create_feature_extractor(self, depth_multiplier):
min_depth = 32 conv_hyperparams = {} return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor(depth_multiplier, min_depth, conv_hyperparams)
'Constructor. Args: architecture: Architecture name of the Resnet V1 model. resnet_model: Definition of the Resnet V1 model. is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not ...
def __init__(self, architecture, resnet_model, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
if ((first_stage_features_stride != 8) and (first_stage_features_stride != 16)): raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._architecture = architecture self._resnet_model = resnet_model super(FasterRCNNResnetV1FeatureExtractor, self).__init__(is_train...
'Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: prepro...
def preprocess(self, resized_inputs):
channel_means = [123.68, 116.779, 103.939] return (resized_inputs - [[channel_means]])
'Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (he...
def _extract_proposal_features(self, preprocessed_inputs, scope):
if (len(preprocessed_inputs.get_shape().as_list()) != 4): raise ValueError(('`preprocessed_inputs` must be 4 dimensional, got a tensor of shape %s' % preprocessed_inputs.get_shape())) shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1],...
'Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor ...
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope(self._architecture, reuse=self._reuse_weights): with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)): with slim.arg_scope([slim.batch_norm], is_training=False): blocks = [resnet...
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet50FeatureExtractor, self).__init__('resnet_v1_50', resnet_v1.resnet_v1_50, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet101FeatureExtractor, self).__init__('resnet_v1_101', resnet_v1.resnet_v1_101, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet152FeatureExtractor, self).__init__('resnet_v1_152', resnet_v1.resnet_v1_152, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'InceptionV2 Feature Extractor for SSD Models. Args: depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops. reuse_weights: Whether to reuse variables. Default is None.'
def __init__(self, depth_multiplier, min_depth, conv_hyperparams, reuse_weights=None):
super(SSDInceptionV2FeatureExtractor, self).__init__(depth_multiplier, min_depth, conv_hyperparams, reuse_weights)
'SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images.'
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)