desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]'
| def extract_features(self, preprocessed_inputs):
| preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.'])
feature_m... |
'Checks the extracted features are of correct shape.
Args:
feature_extractor: The feature extractor to test.
preprocessed_inputs: A [batch, height, width, 3] tensor to extract
features with.
expected_feature_map_shapes: The expected shape of the extracted features.'
| def _validate_features_shape(self, feature_extractor, preprocessed_inputs, expected_feature_map_shapes):
| feature_maps = feature_extractor.extract_features(preprocessed_inputs)
feature_map_shapes = [tf.shape(feature_map) for feature_map in feature_maps]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
feature_map_shapes_out = sess.run(feature_ma... |
'Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.'
| @abstractmethod
def _create_feature_extractor(self, depth_multiplier):
| pass
|
'Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.'
| def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
| if ((first_stage_features_stride != 8) and (first_stage_features_stride != 16)):
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__(is_training, first_stage_features_stride, reuse_weights, weight_decay)
|
'Faster R-CNN with Inception Resnet v2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
ten... | def preprocess(self, resized_inputs):
| return (((2.0 / 255.0) * resized_inputs) - 1.0)
|
'Extracts first stage RPN features.
Extracts features using the first half of the Inception Resnet v2 network.
We construct the network in `align_feature_maps=True` mode, which means
that all VALID paddings in the network are changed to SAME padding so that
the feature maps are aligned.
Args:
preprocessed_inputs: A [ba... | def _extract_proposal_features(self, preprocessed_inputs, scope):
| if (len(preprocessed_inputs.get_shape().as_list()) != 4):
raise ValueError(('`preprocessed_inputs` must be 4 dimensional, got a tensor of shape %s' % preprocessed_inputs.get_shape()))
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(weight_decay=self._w... |
'Extracts second stage box classifier features.
This function reconstructs the "second half" of the Inception ResNet v2
network after the part defined in `_extract_proposal_features`.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
represe... | def _extract_box_classifier_features(self, proposal_feature_maps, scope):
| with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights):
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(weight_decay=self._weight_decay)):
with slim.arg_scope([slim.batch_norm], is_training=False):
with slim.arg_scope([slim.conv2d, slim.max_p... |
'Returns a map of variables to load from a foreign checkpoint.
Note that this overrides the default implementation in
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for
InceptionResnetV2 checkpoints.
TODO: revisit whether it\'s possible to force the
`Repeat` namescope as created in `_extract_box_c... | def restore_from_classification_checkpoint_fn(self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope):
| variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(first_stage_feature_extractor_scope):
var_name = variable.op.name.replace((first_stage_feature_extractor_scope + '/'), '')
variables_to_restore[var_name] = variable
if variable... |
'Call the loss function.
Args:
prediction_tensor: a tensor representing predicted quantities.
target_tensor: a tensor representing regression or classification targets.
ignore_nan_targets: whether to ignore nan targets in the loss computation.
E.g. can be used if the target tensor is missing groundtruth data that
shoul... | def __call__(self, prediction_tensor, target_tensor, ignore_nan_targets=False, scope=None, **params):
| with tf.name_scope(scope, 'Loss', [prediction_tensor, target_tensor, params]) as scope:
if ignore_nan_targets:
target_tensor = tf.where(tf.is_nan(target_tensor), prediction_tensor, target_tensor)
return self._compute_loss(prediction_tensor, target_tensor, **params)
|
'Method to be overriden by implementations.
Args:
prediction_tensor: a tensor representing predicted quantities
target_tensor: a tensor representing regression or classification targets
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the... | @abstractmethod
def _compute_loss(self, prediction_tensor, target_tensor, **params):
| pass
|
'Constructor.
Args:
anchorwise_output: Outputs loss per anchor. (default False)'
| def __init__(self, anchorwise_output=False):
| self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch... | def _compute_loss(self, prediction_tensor, target_tensor, weights):
| weighted_diff = ((prediction_tensor - target_tensor) * tf.expand_dims(weights, 2))
square_diff = (0.5 * tf.square(weighted_diff))
if self._anchorwise_output:
return tf.reduce_sum(square_diff, 2)
return tf.reduce_sum(square_diff)
|
'Constructor.
Args:
anchorwise_output: Outputs loss per anchor. (default False)'
| def __init__(self, anchorwise_output=False):
| self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch... | def _compute_loss(self, prediction_tensor, target_tensor, weights):
| diff = (prediction_tensor - target_tensor)
abs_diff = tf.abs(diff)
abs_diff_lt_1 = tf.less(abs_diff, 1)
anchorwise_smooth_l1norm = (tf.reduce_sum(tf.where(abs_diff_lt_1, (0.5 * tf.square(abs_diff)), (abs_diff - 0.5)), 2) * weights)
if self._anchorwise_output:
return anchorwise_smooth_l1norm
... |
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4]
representing the decoded predicted boxes
target_tensor: A float tensor of shape [batch_size, num_anchors, 4]
representing the decoded target boxes
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
los... | def _compute_loss(self, prediction_tensor, target_tensor, weights):
| predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [(-1), 4]))
target_boxes = box_list.BoxList(tf.reshape(target_tensor, [(-1), 4]))
per_anchor_iou_loss = (1.0 - box_list_ops.matched_iou(predicted_boxes, target_boxes))
return tf.reduce_sum((tf.reshape(weights, [(-1)]) * per_anchor_iou_loss... |
'Constructor.
Args:
anchorwise_output: Outputs loss per anchor. (default False)'
| def __init__(self, anchorwise_output=False):
| self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of sh... | def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None):
| weights = tf.expand_dims(weights, 2)
if (class_indices is not None):
weights *= tf.reshape(ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, (-1)])
per_entry_cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(labels=target_tensor, logits=prediction_tensor)
if se... |
'Constructor.
Args:
anchorwise_output: Whether to output loss per anchor (default False)'
| def __init__(self, anchorwise_output=False):
| self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of sh... | def _compute_loss(self, prediction_tensor, target_tensor, weights):
| num_classes = prediction_tensor.get_shape().as_list()[(-1)]
per_row_cross_ent = tf.nn.softmax_cross_entropy_with_logits(labels=tf.reshape(target_tensor, [(-1), num_classes]), logits=tf.reshape(prediction_tensor, [(-1), num_classes]))
if self._anchorwise_output:
return (tf.reshape(per_row_cross_ent, ... |
'Constructor.
Args:
alpha: a float32 scalar tensor between 0 and 1 representing interpolation
weight
bootstrap_type: set to either \'hard\' or \'soft\' (default)
anchorwise_output: Outputs loss per anchor. (default False)
Raises:
ValueError: if bootstrap_type is not either \'hard\' or \'soft\''
| def __init__(self, alpha, bootstrap_type='soft', anchorwise_output=False):
| if ((bootstrap_type != 'hard') and (bootstrap_type != 'soft')):
raise ValueError("Unrecognized bootstrap_type: must be one of 'hard' or 'soft.'")
self._alpha = alpha
self._bootstrap_type = bootstrap_type
self._anchorwise_output = anchorwise_output
|
'Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of sh... | def _compute_loss(self, prediction_tensor, target_tensor, weights):
| if (self._bootstrap_type == 'soft'):
bootstrap_target_tensor = ((self._alpha * target_tensor) + ((1.0 - self._alpha) * tf.sigmoid(prediction_tensor)))
else:
bootstrap_target_tensor = ((self._alpha * target_tensor) + ((1.0 - self._alpha) * tf.cast((tf.sigmoid(prediction_tensor) > 0.5), tf.float32... |
'Constructor.
The hard example mining implemented by this class can replicate the behavior
in the two aforementioned papers (Srivastava et al., and Liu et al).
To replicate the A2 paper (Srivastava et al), num_hard_examples is set
to a fixed parameter (64 by default) and iou_threshold is set to .7 for
running non-max-s... | def __init__(self, num_hard_examples=64, iou_threshold=0.7, loss_type='both', cls_loss_weight=0.05, loc_loss_weight=0.06, max_negatives_per_positive=None, min_negatives_per_image=0):
| self._num_hard_examples = num_hard_examples
self._iou_threshold = iou_threshold
self._loss_type = loss_type
self._cls_loss_weight = cls_loss_weight
self._loc_loss_weight = loc_loss_weight
self._max_negatives_per_positive = max_negatives_per_positive
self._min_negatives_per_image = min_negati... |
'Computes localization and classification losses after hard mining.
Args:
location_losses: a float tensor of shape [num_images, num_anchors]
representing anchorwise localization losses.
cls_losses: a float tensor of shape [num_images, num_anchors]
representing anchorwise classification losses.
decoded_boxlist_list: a l... | def __call__(self, location_losses, cls_losses, decoded_boxlist_list, match_list=None):
| mined_location_losses = []
mined_cls_losses = []
location_losses = tf.unstack(location_losses)
cls_losses = tf.unstack(cls_losses)
num_images = len(decoded_boxlist_list)
if (not match_list):
match_list = (num_images * [None])
if (not (len(location_losses) == len(decoded_boxlist_list)... |
'Summarize the number of positives and negatives after mining.'
| def summarize(self):
| if (self._num_positives_list and self._num_negatives_list):
avg_num_positives = tf.reduce_mean(tf.to_float(self._num_positives_list))
avg_num_negatives = tf.reduce_mean(tf.to_float(self._num_negatives_list))
tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives)
tf.sum... |
'Subsample a collection of selected indices to a desired neg:pos ratio.
This function takes a subset of M indices (indexing into a large anchor
collection of N anchors where M<N) which are labeled as positive/negative
via a Match object (matched indices are positive, unmatched indices
are negative). It returns a subse... | def _subsample_selection_to_desired_neg_pos_ratio(self, indices, match, max_negatives_per_positive, min_negatives_per_image=0):
| positives_indicator = tf.gather(match.matched_column_indicator(), indices)
negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices)
num_positives = tf.reduce_sum(tf.to_int32(positives_indicator))
max_negatives = tf.maximum(min_negatives_per_image, tf.to_int32((max_negatives_per_posit... |
'Computes matrix of pairwise similarity between BoxLists.
This op (to be overriden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxL... | def compare(self, boxlist1, boxlist2, scope=None):
| with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
|
'Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.'
| def _compare(self, boxlist1, boxlist2):
| return box_list_ops.iou(boxlist1, boxlist2)
|
'Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.'
| def _compare(self, boxlist1, boxlist2):
| return ((-1) * box_list_ops.sq_dist(boxlist1, boxlist2))
|
'Compute pairwise IOA similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise IOA scores.'
| def _compare(self, boxlist1, boxlist2):
| return box_list_ops.ioa(boxlist1, boxlist2)
|
'Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1'
| def __init__(self, positive_fraction=0.5):
| if ((positive_fraction < 0) or (positive_fraction > 1)):
raise ValueError(('positive_fraction should be in range [0,1]. Received: %s.' % positive_fraction))
self._positive_fraction = positive_fraction
|
'Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
Returns:
is_sampled: boolean tensor of shape [N], True for entries which are
sampled... | def subsample(self, indicator, batch_size, labels):
| if (len(indicator.get_shape().as_list()) != 1):
raise ValueError(('indicator must be 1 dimensional, got a tensor of shape %s' % indicator.get_shape()))
if (len(labels.get_shape().as_list()) != 1):
raise ValueError(('labels must be 1 dimensional, got ... |
'Tests that named constructor gives working target assigners.
TODO: Make this test more general.'
| def test_create_target_assigner(self):
| corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
prior_stddevs = tf.constant([[1.0, 1.0, 1.0, 1.0]])
priors.add_field('stddev', prior_stddevs)
multibox_ta = targetassigner.create_target_assigner('Multibox', stag... |
'Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.'
| def __init__(self, boxes):
| if ((len(boxes.get_shape()) != 2) or (boxes.get_shape()[(-1)] != 4)):
raise ValueError('Invalid dimensions for box data.')
if (boxes.dtype != tf.float32):
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
|
'Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.'
| def num_boxes(self):
| return tf.shape(self.data['boxes'])[0]
|
'Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.'
| def num_boxes_static(self):
| return self.data['boxes'].get_shape()[0].value
|
'Returns all fields.'
| def get_all_fields(self):
| return self.data.keys()
|
'Returns all non-box fields (i.e., everything not named \'boxes\').'
| def get_extra_fields(self):
| return [k for k in self.data.keys() if (k != 'boxes')]
|
'Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList'
| def add_field(self, field, field_data):
| self.data[field] = field_data
|
'Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.'
| def get(self):
| return self.get_field('boxes')
|
'Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data'
| def set(self, boxes):
| if ((len(boxes.get_shape()) != 2) or (boxes.get_shape()[(-1)] != 4)):
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
|
'Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associ... | def get_field(self, field):
| if (not self.has_field(field)):
raise ValueError((('field ' + str(field)) + ' does not exist'))
return self.data[field]
|
'Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.'
| def set_field(self, field, value):
| if (not self.has_field(field)):
raise ValueError(('field %s does not exist' % field))
self.data[field] = value
|
'Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].'
| def get_center_coordinates_and_sizes(self, scope=None):
| with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
(ymin, xmin, ymax, xmax) = tf.unstack(tf.transpose(box_corners))
width = (xmax - xmin)
height = (ymax - ymin)
ycenter = (ymin + (height / 2.0))
xcenter = (xmin + (width / 2.0))
... |
'Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.'
| def transpose_coordinates(self, scope=None):
| with tf.name_scope(scope, 'transpose_coordinates'):
(y_min, x_min, y_max, x_max) = tf.split(value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
|
'Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.'
| def as_tensor_dict(self, fields=None):
| tensor_dict = {}
if (fields is None):
fields = self.get_all_fields()
for field in fields:
if (not self.has_field(field)):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
|
'Tests image resizing, checking output sizes.'
| def testResizeToRangePreservesStaticSpatialShape(self):
| in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for (in_shape, expected_shape) in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image = preprocesso... |
'Tests image resizing, checking output sizes.'
| def testResizeToRangeWithDynamicSpatialShape(self):
| in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for (in_shape, expected_shape) in zip(in_shape_list, expected_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
o... |
'Tests image resizing, checking output sizes.'
| def testResizeToRangeWithMasksPreservesStaticSpatialShape(self):
| in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks... |
'Tests image resizing, checking output sizes.'
| def testResizeToRangeWithMasksAndDynamicSpatialShape(self):
| in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks... |
'Tests image resizing, checking output sizes.'
| def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self):
| in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shap... |
'Tests image resizing, checking output sizes.'
| def testResizeToRangeSameMinMax(self):
| in_shape_list = [[312, 312, 3], [299, 299, 3]]
min_dim = 320
max_dim = 320
expected_shape_list = [[320, 320, 3], [320, 320, 3]]
for (in_shape, expected_shape) in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image = preprocessor.resize_to_range(i... |
'Tests box scaling, checking scaled values.'
| def testScaleBoxesToPixelCoordinates(self):
| in_shape = [60, 40, 3]
in_boxes = [[0.1, 0.2, 0.4, 0.6], [0.5, 0.3, 0.9, 0.7]]
expected_boxes = [[6.0, 8.0, 24.0, 24.0], [30.0, 12.0, 54.0, 28.0]]
in_image = tf.random_uniform(in_shape)
in_boxes = tf.constant(in_boxes)
(_, out_boxes) = preprocessor.scale_boxes_to_pixel_coordinates(in_image, boxe... |
'Tests box and keypoint scaling, checking scaled values.'
| def testScaleBoxesToPixelCoordinatesWithKeypoints(self):
| in_shape = [60, 40, 3]
in_boxes = self.createTestBoxes()
in_keypoints = self.createTestKeypoints()
expected_boxes = [[0.0, 10.0, 45.0, 40.0], [15.0, 20.0, 45.0, 40.0]]
expected_keypoints = [[[6.0, 4.0], [12.0, 8.0], [18.0, 12.0]], [[24.0, 16.0], [30.0, 20.0], [36.0, 24.0]]]
in_image = tf.random_... |
'Tests whether channel means have been subtracted.'
| def testSubtractChannelMean(self):
| with self.test_session():
image = tf.zeros((240, 320, 3))
means = [1, 2, 3]
actual = preprocessor.subtract_channel_mean(image, means=means)
actual = actual.eval()
self.assertTrue((actual[:, :, 0] == (-1)).all())
self.assertTrue((actual[:, :, 1] == (-2)).all())
... |
'Tests one hot encoding of multiclass labels.'
| def testOneHotEncoding(self):
| with self.test_session():
labels = tf.constant([1, 4, 2], dtype=tf.int32)
one_hot = preprocessor.one_hot_encoding(labels, num_classes=5)
one_hot = one_hot.eval()
self.assertAllEqual([0, 1, 1, 0, 1], one_hot)
|
'Constructor.
Args:
num_classes: number of classes. Note that num_classes *does not* include
background categories that might be implicitly be predicted in various
implementations.'
| def __init__(self, num_classes):
| self._num_classes = num_classes
self._groundtruth_lists = {}
|
'Access list of groundtruth tensors.
Args:
field: a string key, options are
fields.BoxListFields.{boxes,classes,masks,keypoints}
Returns:
a list of tensors holding groundtruth information (see also
provide_groundtruth function below), with one entry for each image in the
batch.
Raises:
RuntimeError: if the field has no... | def groundtruth_lists(self, field):
| if (field not in self._groundtruth_lists):
raise RuntimeError('Groundtruth tensor %s has not been provided', field)
return self._groundtruth_lists[field]
|
'Input preprocessing.
To be overridden by implementations.
This function is responsible for any scaling/shifting of input values that
is necessary prior to running the detector on an input image.
It is also responsible for any resizing that might be necessary as images
are assumed to arrive in arbitrary sizes. While t... | @abstractmethod
def preprocess(self, inputs):
| pass
|
'Predict prediction tensors from inputs tensor.
Outputs of this function can be passed to loss or postprocess functions.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss... | @abstractmethod
def predict(self, preprocessed_inputs):
| pass
|
'Convert predicted output tensors to final detections.
Outputs adhere to the following conventions:
* Classes are integers in [0, num_classes); background classes are removed
and the first non-background class is mapped to 0.
* Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max]
format and normalized r... | @abstractmethod
def postprocess(self, prediction_dict, **params):
| pass
|
'Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
Returns:
a dictionary mapping strings (loss names) to scalar tensors represent... | @abstractmethod
def loss(self, prediction_dict):
| pass
|
'Provide groundtruth tensors.
Args:
groundtruth_boxes_list: a list of 2-D tf.float32 tensors of shape
[num_boxes, 4] containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max]
format and assumed to be normalized and clipped
relative to the image window with y_min <= ... | def provide_groundtruth(self, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list=None, groundtruth_keypoints_list=None):
| self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list
self._groundtruth_lists[fields.BoxListFields.classes] = groundtruth_classes_list
if groundtruth_masks_list:
self._groundtruth_lists[fields.BoxListFields.masks] = groundtruth_masks_list
if groundtruth_keypoints_list:
... |
'Returns a map of variables to load from a foreign checkpoint.
Returns a map of variable names to load from a checkpoint to variables in
the model graph. This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to boots... | @abstractmethod
def restore_map(self, from_detection_checkpoint=True):
| pass
|
'Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant'
| @abstractproperty
def code_size(self):
| pass
|
'Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes'
| def encode(self, boxes, anchors):
| with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
|
'Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)'
| def decode(self, rel_codes, anchors):
| with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
|
'Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes'
| @abstractmethod
def _encode(self, boxes, anchors):
| pass
|
'Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)'
| @abstractmethod
def _decode(self, rel_codes, anchors):
| pass
|
'Construct Multibox Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder: an object_detection.core.BoxCoder used to encode matching
groundtruth boxes with respect to anchors.
positive_class_weight: classification w... | def __init__(self, similarity_calc, matcher, box_coder, positive_class_weight=1.0, negative_class_weight=1.0, unmatched_cls_target=None):
| if (not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator)):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if (not isinstance(matcher, mat.Matcher)):
raise ValueError('matcher must be a Matcher')
if (not isinstance(box_coder, b... |
'Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute t... | def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, **params):
| if (not isinstance(anchors, box_list.BoxList)):
raise ValueError('anchors must be an BoxList')
if (not isinstance(groundtruth_boxes, box_list.BoxList)):
raise ValueError('groundtruth_boxes must be an BoxList')
if (groundtruth_labels is None):
groundtruth_label... |
'Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target\'s
first dimension.
Returns:
A tensor with the shape info filled in.'
| def _reset_target_shape(self, target, num_anchors):
| target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
|
'Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]'
| def _create_regression_targets(self, anchors, groundtruth_boxes, match):
| matched_anchor_indices = match.matched_column_indices()
unmatched_ignored_anchor_indices = match.unmatched_or_ignored_column_indices()
matched_gt_indices = match.matched_row_indices()
matched_anchors = box_list_ops.gather(anchors, matched_anchor_indices)
matched_gt_boxes = box_list_ops.gather(ground... |
'Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box... | def _default_regression_target(self):
| return tf.constant([(self._box_coder.code_size * [0])], tf.float32)
|
'Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_... | def _create_classification_targets(self, groundtruth_labels, match):
| matched_anchor_indices = match.matched_column_indices()
unmatched_ignored_anchor_indices = match.unmatched_or_ignored_column_indices()
matched_gt_indices = match.matched_row_indices()
matched_cls_targets = tf.gather(groundtruth_labels, matched_gt_indices)
ones = (self._unmatched_cls_target.shape.ndi... |
'Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
reg_wei... | def _create_regression_weights(self, match):
| reg_weights = tf.cast(match.matched_column_indicator(), tf.float32)
return reg_weights
|
'Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set t... | def _create_classification_weights(self, match, positive_class_weight=1.0, negative_class_weight=1.0):
| matched_indicator = tf.cast(match.matched_column_indicator(), tf.float32)
ignore_indicator = tf.cast(match.ignored_column_indicator(), tf.float32)
unmatched_indicator = ((1.0 - matched_indicator) - ignore_indicator)
cls_weights = ((positive_class_weight * matched_indicator) + (negative_class_weight * un... |
'Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder: BoxCoder object.'
| def get_box_coder(self):
| return self._box_coder
|
'Constructs a batch queue holding tensor_dict.
Args:
tensor_dict: dictionary of tensors to batch.
batch_size: batch size.
batch_queue_capacity: max capacity of the queue from which the tensors are
batched.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: max capacity of the queue... | def __init__(self, tensor_dict, batch_size, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity):
| static_shapes = collections.OrderedDict({key: tensor.get_shape() for (key, tensor) in tensor_dict.items()})
runtime_shapes = collections.OrderedDict({(key + rt_shape_str): tf.shape(tensor) for (key, tensor) in tensor_dict.items()})
all_tensors = tensor_dict
all_tensors.update(runtime_shapes)
batched... |
'Dequeues a batch of tensor_dict from the BatchQueue.
TODO: use allow_smaller_final_batch to allow running over the whole eval set
Returns:
A list of tensor_dicts of the requested batch_size.'
| def dequeue(self):
| batched_tensors = self._queue.dequeue()
tensors = {}
shapes = {}
for (key, batched_tensor) in batched_tensors.items():
unbatched_tensor_list = tf.unstack(batched_tensor)
for (i, unbatched_tensor) in enumerate(unbatched_tensor_list):
if (rt_shape_str in key):
s... |
'Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets... | def __init__(self, is_training, num_classes):
| self._is_training = is_training
self._num_classes = num_classes
|
'Computes encoded object locations and corresponding confidences.
Takes a high level image feature map as input and produce two predictions,
(1) a tensor encoding box locations, and
(2) a tensor encoding class scores for each corresponding box.
In this interface, we only assume that two tensors are returned as output
a... | def predict(self, image_features, num_predictions_per_location, scope, **params):
| with tf.variable_scope(scope):
return self._predict(image_features, num_predictions_per_location, **params)
|
'Implementations must override this method.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
**params: Add... | @abstractmethod
def _predict(self, image_features, num_predictions_per_location, **params):
| pass
|
'Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets... | def __init__(self, is_training, num_classes, conv_hyperparams, num_spatial_bins, depth, crop_size, box_code_size):
| super(RfcnBoxPredictor, self).__init__(is_training, num_classes)
self._conv_hyperparams = conv_hyperparams
self._num_spatial_bins = num_spatial_bins
self._depth = depth
self._crop_size = crop_size
self._box_code_size = box_code_size
|
'Computes encoded object locations and corresponding confidences.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feat... | def _predict(self, image_features, num_predictions_per_location, proposal_boxes):
| if (num_predictions_per_location != 1):
raise ValueError('Currently RfcnBoxPredictor only supports predicting a single box per class per location.')
batch_size = tf.shape(proposal_boxes)[0]
num_boxes = tf.shape(proposal_boxes)[1]
def get_box_indices(proposals):
... |
'Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets... | def __init__(self, is_training, num_classes, fc_hyperparams, use_dropout, dropout_keep_prob, box_code_size, conv_hyperparams=None, predict_instance_masks=False, mask_prediction_conv_depth=256, predict_keypoints=False):
| super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes)
self._fc_hyperparams = fc_hyperparams
self._use_dropout = use_dropout
self._box_code_size = box_code_size
self._dropout_keep_prob = dropout_keep_prob
self._conv_hyperparams = conv_hyperparams
self._predict_instance_masks = ... |
'Computes encoded object locations and corresponding confidences.
Flattens image_features and applies fully connected ops (with no
non-linearity) to predict box encodings and class predictions. In this
setting, anchors are not spatially arranged in any way and are assumed to
have been folded into the batch dimension. ... | def _predict(self, image_features, num_predictions_per_location):
| if (num_predictions_per_location != 1):
raise ValueError('Currently FullyConnectedBoxPredictor only supports predicting a single box per class per location.')
spatial_averaged_image_features = tf.reduce_mean(image_features, [1, 2], keep_dims=True, name='AvgPool')
fla... |
'Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets... | def __init__(self, is_training, num_classes, conv_hyperparams, min_depth, max_depth, num_layers_before_predictor, use_dropout, dropout_keep_prob, kernel_size, box_code_size, apply_sigmoid_to_scores=False):
| super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes)
if (min_depth > max_depth):
raise ValueError('min_depth should be less than or equal to max_depth')
self._conv_hyperparams = conv_hyperparams
self._min_depth = min_depth
self._max_depth = max_de... |
'Computes encoded object locations and corresponding confidences.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feat... | def _predict(self, image_features, num_predictions_per_location):
| features_depth = static_shape.get_depth(image_features.get_shape())
depth = max(min(features_depth, self._max_depth), self._min_depth)
num_class_slots = (self.num_classes + 1)
net = image_features
with slim.arg_scope(self._conv_hyperparams):
with slim.arg_scope([slim.dropout], is_training=se... |
'Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_re... | def __init__(self, match_results):
| if (match_results.shape.ndims != 1):
raise ValueError('match_results should have rank 1')
if (match_results.dtype != tf.int32):
raise ValueError('match_results should be an int32 or int64 scalar tensor')
self._match_results = match_results
|
'The accessor for match results.
Returns:
the tensor which encodes the match results.'
| @property
def match_results(self):
| return self._match_results
|
'Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def matched_column_indices(self):
| return self._reshape_and_cast(tf.where(tf.greater(self._match_results, (-1))))
|
'Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def matched_column_indicator(self):
| return tf.greater_equal(self._match_results, 0)
|
'Returns number (int32 scalar tensor) of matched columns.'
| def num_matched_columns(self):
| return tf.size(self.matched_column_indices())
|
'Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def unmatched_column_indices(self):
| return self._reshape_and_cast(tf.where(tf.equal(self._match_results, (-1))))
|
'Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def unmatched_column_indicator(self):
| return tf.equal(self._match_results, (-1))
|
'Returns number (int32 scalar tensor) of unmatched columns.'
| def num_unmatched_columns(self):
| return tf.size(self.unmatched_column_indices())
|
'Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def ignored_column_indices(self):
| return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
|
'Returns boolean column indicator where True means the colum is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.'
| def ignored_column_indicator(self):
| return tf.equal(self._match_results, (-2))
|
'Returns number (int32 scalar tensor) of matched columns.'
| def num_ignored_columns(self):
| return tf.size(self.ignored_column_indices())
|
'Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.'
| def unmatched_or_ignored_column_indices(self):
| return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.