body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=None, unmatched_keypoint_score=0.1, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance'):
'Refines regressed keypoints by snapping to the nearest candidate keypoints.\n\n The initial regressed keypoints represent a full set of keypoints regressed\n from the centers of the objects. The keypoint candidates are estimated\n independently from heatmaps, and are not associated with any object instances.\n This function refines the regressed keypoints by "snapping" to the\n nearest/highest score/highest score-distance ratio (depending on the\n candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").\n If no candidates are nearby, the regressed keypoint remains unchanged.\n\n In order to snap a regressed keypoint to a candidate keypoint, the following\n must be satisfied:\n - the candidate keypoint must be of the same type as the regressed keypoint\n - the candidate keypoint must not lie outside the predicted boxes (or the\n boxes which encloses the regressed keypoints for the instance if `bboxes` is\n not provided). Note that the box is scaled by\n `regressed_box_scale` in height and width, to provide some margin around the\n keypoints\n - the distance to the closest candidate keypoint cannot exceed\n candidate_search_scale * max(height, width), where height and width refer to\n the bounding box for the instance.\n\n Note that the same candidate keypoint is allowed to snap to regressed\n keypoints in difference instances.\n\n Args:\n regressed_keypoints: A float tensor of shape\n [batch_size, num_instances, num_keypoints, 2] with the initial regressed\n keypoints.\n keypoint_candidates: A tensor of shape\n [batch_size, max_candidates, num_keypoints, 2] holding the location of\n keypoint candidates in [y, x] format (expressed in absolute coordinates in\n the output coordinate frame).\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] indicating the scores for\n keypoint candidates.\n num_keypoint_candidates: An integer tensor of shape\n [batch_size, num_keypoints] indicating the number of valid candidates for\n each keypoint type, as there may be padding (dim 1) of\n `keypoint_candidates` and `keypoint_scores`.\n bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted\n bounding boxes for each instance, expressed in the output coordinate\n frame. If not provided, boxes will be computed from regressed keypoints.\n unmatched_keypoint_score: float, the default score to use for regressed\n keypoints that are not successfully snapped to a nearby candidate.\n box_scale: float, the multiplier to expand the bounding boxes (either the\n provided boxes or those which tightly cover the regressed keypoints) for\n an instance. This scale is typically larger than 1.0 when not providing\n `bboxes`.\n candidate_search_scale: float, the scale parameter that multiplies the\n largest dimension of a bounding box. The resulting distance becomes a\n search radius for candidates in the vicinity of each regressed keypoint.\n candidate_ranking_mode: A string as one of [\'min_distance\',\n \'score_distance_ratio\'] indicating how to select the candidate. If invalid\n value is provided, an ValueError will be raised.\n\n Returns:\n A tuple with:\n refined_keypoints: A float tensor of shape\n [batch_size, num_instances, num_keypoints, 2] with the final, refined\n keypoints.\n refined_scores: A float tensor of shape\n [batch_size, num_instances, num_keypoints] with scores associated with all\n instances and keypoints in `refined_keypoints`.\n\n Raises:\n ValueError: if provided candidate_ranking_mode is not one of\n [\'min_distance\', \'score_distance_ratio\']\n '
(batch_size, num_instances, num_keypoints, _) = shape_utils.combined_static_and_dynamic_shape(regressed_keypoints)
max_candidates = keypoint_candidates.shape[1]
range_tiled = tf.tile(tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]), [batch_size, 1, num_keypoints])
num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1), [1, max_candidates, 1])
invalid_candidates = (range_tiled >= num_candidates_tiled)
nan_mask = tf.where(invalid_candidates, (np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32)), tf.ones_like(invalid_candidates, dtype=tf.float32))
keypoint_candidates_with_nans = tf.math.multiply(keypoint_candidates, tf.expand_dims(nan_mask, (- 1)))
regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints, axis=2)
keypoint_candidates_expanded = tf.expand_dims(keypoint_candidates_with_nans, axis=1)
sqrd_distances = tf.math.reduce_sum(tf.math.squared_difference(regressed_keypoint_expanded, keypoint_candidates_expanded), axis=(- 1))
distances = tf.math.sqrt(sqrd_distances)
min_distances = tf.math.reduce_min(distances, axis=2)
if (candidate_ranking_mode == 'min_distance'):
nearby_candidate_inds = tf.math.argmin(distances, axis=2)
elif (candidate_ranking_mode == 'score_distance_ratio'):
tiled_keypoint_scores = tf.tile(tf.expand_dims(keypoint_scores, axis=1), multiples=[1, num_instances, 1, 1])
ranking_scores = (tiled_keypoint_scores / (distances + 1e-06))
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
else:
raise ValueError(('Not recognized candidate_ranking_mode: %s' % candidate_ranking_mode))
(nearby_candidate_coords, nearby_candidate_scores) = _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, nearby_candidate_inds)
if (bboxes is None):
regressed_keypoints_flattened = tf.reshape(regressed_keypoints, [(- 1), num_keypoints, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(regressed_keypoints_flattened)
else:
bboxes_flattened = tf.reshape(bboxes, [(- 1), 4])
boxlist = box_list.BoxList(bboxes_flattened)
boxlist_scaled = box_list_ops.scale_height_width(boxlist, box_scale, box_scale)
bboxes_scaled = boxlist_scaled.get()
bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])
bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])
(ymin, xmin, ymax, xmax) = tf.unstack(bboxes_tiled, axis=3)
search_radius = (tf.math.maximum((ymax - ymin), (xmax - xmin)) * candidate_search_scale)
mask = (((((tf.cast((nearby_candidate_coords[:, :, :, 0] < ymin), tf.int32) + tf.cast((nearby_candidate_coords[:, :, :, 0] > ymax), tf.int32)) + tf.cast((nearby_candidate_coords[:, :, :, 1] < xmin), tf.int32)) + tf.cast((nearby_candidate_coords[:, :, :, 1] > xmax), tf.int32)) + tf.cast((nearby_candidate_scores < unmatched_keypoint_score), tf.int32)) + tf.cast((min_distances > search_radius), tf.int32))
mask = (mask > 0)
refined_keypoints = tf.where(tf.tile(tf.expand_dims(mask, (- 1)), [1, 1, 1, 2]), regressed_keypoints, nearby_candidate_coords)
refined_scores = tf.where(mask, (unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores)), nearby_candidate_scores)
return (refined_keypoints, refined_scores)
| 100,583,164,911,949,800 |
Refines regressed keypoints by snapping to the nearest candidate keypoints.
The initial regressed keypoints represent a full set of keypoints regressed
from the centers of the objects. The keypoint candidates are estimated
independently from heatmaps, and are not associated with any object instances.
This function refines the regressed keypoints by "snapping" to the
nearest/highest score/highest score-distance ratio (depending on the
candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").
If no candidates are nearby, the regressed keypoint remains unchanged.
In order to snap a regressed keypoint to a candidate keypoint, the following
must be satisfied:
- the candidate keypoint must be of the same type as the regressed keypoint
- the candidate keypoint must not lie outside the predicted boxes (or the
boxes which encloses the regressed keypoints for the instance if `bboxes` is
not provided). Note that the box is scaled by
`regressed_box_scale` in height and width, to provide some margin around the
keypoints
- the distance to the closest candidate keypoint cannot exceed
candidate_search_scale * max(height, width), where height and width refer to
the bounding box for the instance.
Note that the same candidate keypoint is allowed to snap to regressed
keypoints in difference instances.
Args:
regressed_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the initial regressed
keypoints.
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the location of
keypoint candidates in [y, x] format (expressed in absolute coordinates in
the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] indicating the number of valid candidates for
each keypoint type, as there may be padding (dim 1) of
`keypoint_candidates` and `keypoint_scores`.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
unmatched_keypoint_score: float, the default score to use for regressed
keypoints that are not successfully snapped to a nearby candidate.
box_scale: float, the multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints) for
an instance. This scale is typically larger than 1.0 when not providing
`bboxes`.
candidate_search_scale: float, the scale parameter that multiplies the
largest dimension of a bounding box. The resulting distance becomes a
search radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: A string as one of ['min_distance',
'score_distance_ratio'] indicating how to select the candidate. If invalid
value is provided, an ValueError will be raised.
Returns:
A tuple with:
refined_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the final, refined
keypoints.
refined_scores: A float tensor of shape
[batch_size, num_instances, num_keypoints] with scores associated with all
instances and keypoints in `refined_keypoints`.
Raises:
ValueError: if provided candidate_ranking_mode is not one of
['min_distance', 'score_distance_ratio']
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
refine_keypoints
|
AvikantSrivastava/models
|
python
|
def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=None, unmatched_keypoint_score=0.1, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance'):
'Refines regressed keypoints by snapping to the nearest candidate keypoints.\n\n The initial regressed keypoints represent a full set of keypoints regressed\n from the centers of the objects. The keypoint candidates are estimated\n independently from heatmaps, and are not associated with any object instances.\n This function refines the regressed keypoints by "snapping" to the\n nearest/highest score/highest score-distance ratio (depending on the\n candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").\n If no candidates are nearby, the regressed keypoint remains unchanged.\n\n In order to snap a regressed keypoint to a candidate keypoint, the following\n must be satisfied:\n - the candidate keypoint must be of the same type as the regressed keypoint\n - the candidate keypoint must not lie outside the predicted boxes (or the\n boxes which encloses the regressed keypoints for the instance if `bboxes` is\n not provided). Note that the box is scaled by\n `regressed_box_scale` in height and width, to provide some margin around the\n keypoints\n - the distance to the closest candidate keypoint cannot exceed\n candidate_search_scale * max(height, width), where height and width refer to\n the bounding box for the instance.\n\n Note that the same candidate keypoint is allowed to snap to regressed\n keypoints in difference instances.\n\n Args:\n regressed_keypoints: A float tensor of shape\n [batch_size, num_instances, num_keypoints, 2] with the initial regressed\n keypoints.\n keypoint_candidates: A tensor of shape\n [batch_size, max_candidates, num_keypoints, 2] holding the location of\n keypoint candidates in [y, x] format (expressed in absolute coordinates in\n the output coordinate frame).\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] indicating the scores for\n keypoint candidates.\n num_keypoint_candidates: An integer tensor of shape\n [batch_size, num_keypoints] indicating the number of valid candidates for\n each keypoint type, as there may be padding (dim 1) of\n `keypoint_candidates` and `keypoint_scores`.\n bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted\n bounding boxes for each instance, expressed in the output coordinate\n frame. If not provided, boxes will be computed from regressed keypoints.\n unmatched_keypoint_score: float, the default score to use for regressed\n keypoints that are not successfully snapped to a nearby candidate.\n box_scale: float, the multiplier to expand the bounding boxes (either the\n provided boxes or those which tightly cover the regressed keypoints) for\n an instance. This scale is typically larger than 1.0 when not providing\n `bboxes`.\n candidate_search_scale: float, the scale parameter that multiplies the\n largest dimension of a bounding box. The resulting distance becomes a\n search radius for candidates in the vicinity of each regressed keypoint.\n candidate_ranking_mode: A string as one of [\'min_distance\',\n \'score_distance_ratio\'] indicating how to select the candidate. If invalid\n value is provided, an ValueError will be raised.\n\n Returns:\n A tuple with:\n refined_keypoints: A float tensor of shape\n [batch_size, num_instances, num_keypoints, 2] with the final, refined\n keypoints.\n refined_scores: A float tensor of shape\n [batch_size, num_instances, num_keypoints] with scores associated with all\n instances and keypoints in `refined_keypoints`.\n\n Raises:\n ValueError: if provided candidate_ranking_mode is not one of\n [\'min_distance\', \'score_distance_ratio\']\n '
(batch_size, num_instances, num_keypoints, _) = shape_utils.combined_static_and_dynamic_shape(regressed_keypoints)
max_candidates = keypoint_candidates.shape[1]
range_tiled = tf.tile(tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]), [batch_size, 1, num_keypoints])
num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1), [1, max_candidates, 1])
invalid_candidates = (range_tiled >= num_candidates_tiled)
nan_mask = tf.where(invalid_candidates, (np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32)), tf.ones_like(invalid_candidates, dtype=tf.float32))
keypoint_candidates_with_nans = tf.math.multiply(keypoint_candidates, tf.expand_dims(nan_mask, (- 1)))
regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints, axis=2)
keypoint_candidates_expanded = tf.expand_dims(keypoint_candidates_with_nans, axis=1)
sqrd_distances = tf.math.reduce_sum(tf.math.squared_difference(regressed_keypoint_expanded, keypoint_candidates_expanded), axis=(- 1))
distances = tf.math.sqrt(sqrd_distances)
min_distances = tf.math.reduce_min(distances, axis=2)
if (candidate_ranking_mode == 'min_distance'):
nearby_candidate_inds = tf.math.argmin(distances, axis=2)
elif (candidate_ranking_mode == 'score_distance_ratio'):
tiled_keypoint_scores = tf.tile(tf.expand_dims(keypoint_scores, axis=1), multiples=[1, num_instances, 1, 1])
ranking_scores = (tiled_keypoint_scores / (distances + 1e-06))
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
else:
raise ValueError(('Not recognized candidate_ranking_mode: %s' % candidate_ranking_mode))
(nearby_candidate_coords, nearby_candidate_scores) = _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, nearby_candidate_inds)
if (bboxes is None):
regressed_keypoints_flattened = tf.reshape(regressed_keypoints, [(- 1), num_keypoints, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(regressed_keypoints_flattened)
else:
bboxes_flattened = tf.reshape(bboxes, [(- 1), 4])
boxlist = box_list.BoxList(bboxes_flattened)
boxlist_scaled = box_list_ops.scale_height_width(boxlist, box_scale, box_scale)
bboxes_scaled = boxlist_scaled.get()
bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])
bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])
(ymin, xmin, ymax, xmax) = tf.unstack(bboxes_tiled, axis=3)
search_radius = (tf.math.maximum((ymax - ymin), (xmax - xmin)) * candidate_search_scale)
mask = (((((tf.cast((nearby_candidate_coords[:, :, :, 0] < ymin), tf.int32) + tf.cast((nearby_candidate_coords[:, :, :, 0] > ymax), tf.int32)) + tf.cast((nearby_candidate_coords[:, :, :, 1] < xmin), tf.int32)) + tf.cast((nearby_candidate_coords[:, :, :, 1] > xmax), tf.int32)) + tf.cast((nearby_candidate_scores < unmatched_keypoint_score), tf.int32)) + tf.cast((min_distances > search_radius), tf.int32))
mask = (mask > 0)
refined_keypoints = tf.where(tf.tile(tf.expand_dims(mask, (- 1)), [1, 1, 1, 2]), regressed_keypoints, nearby_candidate_coords)
refined_scores = tf.where(mask, (unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores)), nearby_candidate_scores)
return (refined_keypoints, refined_scores)
|
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds, num_total_keypoints):
'Scatter keypoint elements into tensors with full keypoints dimension.\n\n Args:\n keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32\n tensor.\n keypoint_scores: a [batch_size, num_instances, num_keypoints] float32\n tensor.\n keypoint_inds: a list of integers that indicate the keypoint indices for\n this specific keypoint class. These indices are used to scatter into\n tensors that have a `num_total_keypoints` dimension.\n num_total_keypoints: The total number of keypoints that this model predicts.\n\n Returns:\n A tuple with\n keypoint_coords_padded: a\n [batch_size, num_instances, num_total_keypoints,2] float32 tensor.\n keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]\n float32 tensor.\n '
(batch_size, num_instances, _, _) = shape_utils.combined_static_and_dynamic_shape(keypoint_coords)
kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=(- 1))
kpt_coords_scattered = tf.scatter_nd(indices=kpt_inds_tensor, updates=kpt_coords_transposed, shape=[num_total_keypoints, batch_size, num_instances, 2])
kpt_scores_scattered = tf.scatter_nd(indices=kpt_inds_tensor, updates=kpt_scores_transposed, shape=[num_total_keypoints, batch_size, num_instances])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
return (keypoint_coords_padded, keypoint_scores_padded)
| -119,598,477,833,223,570 |
Scatter keypoint elements into tensors with full keypoints dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
keypoint_inds: a list of integers that indicate the keypoint indices for
this specific keypoint class. These indices are used to scatter into
tensors that have a `num_total_keypoints` dimension.
num_total_keypoints: The total number of keypoints that this model predicts.
Returns:
A tuple with
keypoint_coords_padded: a
[batch_size, num_instances, num_total_keypoints,2] float32 tensor.
keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]
float32 tensor.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_pad_to_full_keypoint_dim
|
AvikantSrivastava/models
|
python
|
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds, num_total_keypoints):
'Scatter keypoint elements into tensors with full keypoints dimension.\n\n Args:\n keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32\n tensor.\n keypoint_scores: a [batch_size, num_instances, num_keypoints] float32\n tensor.\n keypoint_inds: a list of integers that indicate the keypoint indices for\n this specific keypoint class. These indices are used to scatter into\n tensors that have a `num_total_keypoints` dimension.\n num_total_keypoints: The total number of keypoints that this model predicts.\n\n Returns:\n A tuple with\n keypoint_coords_padded: a\n [batch_size, num_instances, num_total_keypoints,2] float32 tensor.\n keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]\n float32 tensor.\n '
(batch_size, num_instances, _, _) = shape_utils.combined_static_and_dynamic_shape(keypoint_coords)
kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=(- 1))
kpt_coords_scattered = tf.scatter_nd(indices=kpt_inds_tensor, updates=kpt_coords_transposed, shape=[num_total_keypoints, batch_size, num_instances, 2])
kpt_scores_scattered = tf.scatter_nd(indices=kpt_inds_tensor, updates=kpt_scores_transposed, shape=[num_total_keypoints, batch_size, num_instances])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
return (keypoint_coords_padded, keypoint_scores_padded)
|
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds, max_instances):
'Scatter keypoint elements into tensors with full instance dimension.\n\n Args:\n keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32\n tensor.\n keypoint_scores: a [batch_size, num_instances, num_keypoints] float32\n tensor.\n instance_inds: a list of integers that indicate the instance indices for\n these keypoints. These indices are used to scatter into tensors\n that have a `max_instances` dimension.\n max_instances: The maximum number of instances detected by the model.\n\n Returns:\n A tuple with\n keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]\n float32 tensor.\n keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]\n float32 tensor.\n '
(batch_size, _, num_keypoints, _) = shape_utils.combined_static_and_dynamic_shape(keypoint_coords)
kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
instance_inds = tf.expand_dims(instance_inds, axis=(- 1))
kpt_coords_scattered = tf.scatter_nd(indices=instance_inds, updates=kpt_coords_transposed, shape=[max_instances, batch_size, num_keypoints, 2])
kpt_scores_scattered = tf.scatter_nd(indices=instance_inds, updates=kpt_scores_transposed, shape=[max_instances, batch_size, num_keypoints])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
return (keypoint_coords_padded, keypoint_scores_padded)
| -6,481,720,654,041,201,000 |
Scatter keypoint elements into tensors with full instance dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
instance_inds: a list of integers that indicate the instance indices for
these keypoints. These indices are used to scatter into tensors
that have a `max_instances` dimension.
max_instances: The maximum number of instances detected by the model.
Returns:
A tuple with
keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]
float32 tensor.
keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]
float32 tensor.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_pad_to_full_instance_dim
|
AvikantSrivastava/models
|
python
|
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds, max_instances):
'Scatter keypoint elements into tensors with full instance dimension.\n\n Args:\n keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32\n tensor.\n keypoint_scores: a [batch_size, num_instances, num_keypoints] float32\n tensor.\n instance_inds: a list of integers that indicate the instance indices for\n these keypoints. These indices are used to scatter into tensors\n that have a `max_instances` dimension.\n max_instances: The maximum number of instances detected by the model.\n\n Returns:\n A tuple with\n keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]\n float32 tensor.\n keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]\n float32 tensor.\n '
(batch_size, _, num_keypoints, _) = shape_utils.combined_static_and_dynamic_shape(keypoint_coords)
kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
instance_inds = tf.expand_dims(instance_inds, axis=(- 1))
kpt_coords_scattered = tf.scatter_nd(indices=instance_inds, updates=kpt_coords_transposed, shape=[max_instances, batch_size, num_keypoints, 2])
kpt_scores_scattered = tf.scatter_nd(indices=instance_inds, updates=kpt_scores_transposed, shape=[max_instances, batch_size, num_keypoints])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
return (keypoint_coords_padded, keypoint_scores_padded)
|
def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, indices):
'Gathers keypoint candidate coordinates and scores at indices.\n\n Args:\n keypoint_candidates: a float tensor of shape [batch_size, max_candidates,\n num_keypoints, 2] with candidate coordinates.\n keypoint_scores: a float tensor of shape [batch_size, max_candidates,\n num_keypoints] with keypoint scores.\n indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]\n with indices.\n\n Returns:\n A tuple with\n gathered_keypoint_candidates: a float tensor of shape [batch_size,\n num_indices, num_keypoints, 2] with gathered coordinates.\n gathered_keypoint_scores: a float tensor of shape [batch_size,\n num_indices, num_keypoints, 2].\n '
keypoint_candidates_transposed = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])
nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1])
nearby_candidate_coords_tranposed = tf.gather(keypoint_candidates_transposed, nearby_candidate_inds_transposed, batch_dims=2)
nearby_candidate_scores_transposed = tf.gather(keypoint_scores_transposed, nearby_candidate_inds_transposed, batch_dims=2)
gathered_keypoint_candidates = tf.transpose(nearby_candidate_coords_tranposed, [0, 2, 1, 3])
gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed, [0, 2, 1])
return (gathered_keypoint_candidates, gathered_keypoint_scores)
| 1,550,501,514,456,468,500 |
Gathers keypoint candidate coordinates and scores at indices.
Args:
keypoint_candidates: a float tensor of shape [batch_size, max_candidates,
num_keypoints, 2] with candidate coordinates.
keypoint_scores: a float tensor of shape [batch_size, max_candidates,
num_keypoints] with keypoint scores.
indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]
with indices.
Returns:
A tuple with
gathered_keypoint_candidates: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2] with gathered coordinates.
gathered_keypoint_scores: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2].
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_gather_candidates_at_indices
|
AvikantSrivastava/models
|
python
|
def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, indices):
'Gathers keypoint candidate coordinates and scores at indices.\n\n Args:\n keypoint_candidates: a float tensor of shape [batch_size, max_candidates,\n num_keypoints, 2] with candidate coordinates.\n keypoint_scores: a float tensor of shape [batch_size, max_candidates,\n num_keypoints] with keypoint scores.\n indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]\n with indices.\n\n Returns:\n A tuple with\n gathered_keypoint_candidates: a float tensor of shape [batch_size,\n num_indices, num_keypoints, 2] with gathered coordinates.\n gathered_keypoint_scores: a float tensor of shape [batch_size,\n num_indices, num_keypoints, 2].\n '
keypoint_candidates_transposed = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])
nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1])
nearby_candidate_coords_tranposed = tf.gather(keypoint_candidates_transposed, nearby_candidate_inds_transposed, batch_dims=2)
nearby_candidate_scores_transposed = tf.gather(keypoint_scores_transposed, nearby_candidate_inds_transposed, batch_dims=2)
gathered_keypoint_candidates = tf.transpose(nearby_candidate_coords_tranposed, [0, 2, 1, 3])
gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed, [0, 2, 1])
return (gathered_keypoint_candidates, gathered_keypoint_scores)
|
def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):
'Get the index in a flattened array given row and column indices.'
return ((row_indices * num_cols) + col_indices)
| -2,580,679,528,769,606,700 |
Get the index in a flattened array given row and column indices.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
flattened_indices_from_row_col_indices
|
AvikantSrivastava/models
|
python
|
def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):
return ((row_indices * num_cols) + col_indices)
|
def row_col_channel_indices_from_flattened_indices(indices, num_cols, num_channels):
'Computes row, column and channel indices from flattened indices.\n\n Args:\n indices: An integer tensor of any shape holding the indices in the flattened\n space.\n num_cols: Number of columns in the image (width).\n num_channels: Number of channels in the image.\n\n Returns:\n row_indices: The row indices corresponding to each of the input indices.\n Same shape as indices.\n col_indices: The column indices corresponding to each of the input indices.\n Same shape as indices.\n channel_indices. The channel indices corresponding to each of the input\n indices.\n\n '
row_indices = ((indices // num_channels) // num_cols)
col_indices = ((indices // num_channels) % num_cols)
channel_indices = (indices % num_channels)
return (row_indices, col_indices, channel_indices)
| 8,608,488,016,104,368,000 |
Computes row, column and channel indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
num_channels: Number of channels in the image.
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
channel_indices. The channel indices corresponding to each of the input
indices.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
row_col_channel_indices_from_flattened_indices
|
AvikantSrivastava/models
|
python
|
def row_col_channel_indices_from_flattened_indices(indices, num_cols, num_channels):
'Computes row, column and channel indices from flattened indices.\n\n Args:\n indices: An integer tensor of any shape holding the indices in the flattened\n space.\n num_cols: Number of columns in the image (width).\n num_channels: Number of channels in the image.\n\n Returns:\n row_indices: The row indices corresponding to each of the input indices.\n Same shape as indices.\n col_indices: The column indices corresponding to each of the input indices.\n Same shape as indices.\n channel_indices. The channel indices corresponding to each of the input\n indices.\n\n '
row_indices = ((indices // num_channels) // num_cols)
col_indices = ((indices // num_channels) % num_cols)
channel_indices = (indices % num_channels)
return (row_indices, col_indices, channel_indices)
|
def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height, width):
'Computes valid anchor weights for an image assuming pixels will be flattened.\n\n This function is useful when we only want to penalize valid areas in the\n image in the case when padding is used. The function assumes that the loss\n function will be applied after flattening the spatial dimensions and returns\n anchor weights accordingly.\n\n Args:\n true_image_shapes: An integer tensor of shape [batch_size, 3] representing\n the true image shape (without padding) for each sample in the batch.\n height: height of the prediction from the network.\n width: width of the prediction from the network.\n\n Returns:\n valid_anchor_weights: a float tensor of shape [batch_size, height * width]\n with 1s in locations where the spatial coordinates fall within the height\n and width in true_image_shapes.\n '
indices = tf.reshape(tf.range((height * width)), [1, (- 1)])
batch_size = tf.shape(true_image_shapes)[0]
batch_indices = (tf.ones((batch_size, 1), dtype=tf.int32) * indices)
(y_coords, x_coords, _) = row_col_channel_indices_from_flattened_indices(batch_indices, width, 1)
(max_y, max_x) = (true_image_shapes[:, 0], true_image_shapes[:, 1])
max_x = _to_float32(tf.expand_dims(max_x, 1))
max_y = _to_float32(tf.expand_dims(max_y, 1))
x_coords = _to_float32(x_coords)
y_coords = _to_float32(y_coords)
valid_mask = tf.math.logical_and((x_coords < max_x), (y_coords < max_y))
return _to_float32(valid_mask)
| 2,124,917,025,900,141,300 |
Computes valid anchor weights for an image assuming pixels will be flattened.
This function is useful when we only want to penalize valid areas in the
image in the case when padding is used. The function assumes that the loss
function will be applied after flattening the spatial dimensions and returns
anchor weights accordingly.
Args:
true_image_shapes: An integer tensor of shape [batch_size, 3] representing
the true image shape (without padding) for each sample in the batch.
height: height of the prediction from the network.
width: width of the prediction from the network.
Returns:
valid_anchor_weights: a float tensor of shape [batch_size, height * width]
with 1s in locations where the spatial coordinates fall within the height
and width in true_image_shapes.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
get_valid_anchor_weights_in_flattened_image
|
AvikantSrivastava/models
|
python
|
def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height, width):
'Computes valid anchor weights for an image assuming pixels will be flattened.\n\n This function is useful when we only want to penalize valid areas in the\n image in the case when padding is used. The function assumes that the loss\n function will be applied after flattening the spatial dimensions and returns\n anchor weights accordingly.\n\n Args:\n true_image_shapes: An integer tensor of shape [batch_size, 3] representing\n the true image shape (without padding) for each sample in the batch.\n height: height of the prediction from the network.\n width: width of the prediction from the network.\n\n Returns:\n valid_anchor_weights: a float tensor of shape [batch_size, height * width]\n with 1s in locations where the spatial coordinates fall within the height\n and width in true_image_shapes.\n '
indices = tf.reshape(tf.range((height * width)), [1, (- 1)])
batch_size = tf.shape(true_image_shapes)[0]
batch_indices = (tf.ones((batch_size, 1), dtype=tf.int32) * indices)
(y_coords, x_coords, _) = row_col_channel_indices_from_flattened_indices(batch_indices, width, 1)
(max_y, max_x) = (true_image_shapes[:, 0], true_image_shapes[:, 1])
max_x = _to_float32(tf.expand_dims(max_x, 1))
max_y = _to_float32(tf.expand_dims(max_y, 1))
x_coords = _to_float32(x_coords)
y_coords = _to_float32(y_coords)
valid_mask = tf.math.logical_and((x_coords < max_x), (y_coords < max_y))
return _to_float32(valid_mask)
|
def convert_strided_predictions_to_normalized_boxes(boxes, stride, true_image_shapes):
"Converts predictions in the output space to normalized boxes.\n\n Boxes falling outside the valid image boundary are clipped to be on the\n boundary.\n\n Args:\n boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw\n coordinates of boxes in the model's output space.\n stride: The stride in the output space.\n true_image_shapes: A tensor of shape [batch_size, 3] representing the true\n shape of the input not considering padding.\n\n Returns:\n boxes: A tensor of shape [batch_size, num_boxes, 4] representing the\n coordinates of the normalized boxes.\n "
def _normalize_boxlist(args):
(boxes, height, width) = args
boxes = box_list_ops.scale(boxes, stride, stride)
boxes = box_list_ops.to_normalized_coordinates(boxes, height, width)
boxes = box_list_ops.clip_to_window(boxes, [0.0, 0.0, 1.0, 1.0], filter_nonoverlapping=False)
return boxes
box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)]
(true_heights, true_widths, _) = tf.unstack(true_image_shapes, axis=1)
true_heights_list = tf.unstack(true_heights, axis=0)
true_widths_list = tf.unstack(true_widths, axis=0)
box_lists = list(map(_normalize_boxlist, zip(box_lists, true_heights_list, true_widths_list)))
boxes = tf.stack([box_list_instance.get() for box_list_instance in box_lists], axis=0)
return boxes
| 8,227,977,681,416,438,000 |
Converts predictions in the output space to normalized boxes.
Boxes falling outside the valid image boundary are clipped to be on the
boundary.
Args:
boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw
coordinates of boxes in the model's output space.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
Returns:
boxes: A tensor of shape [batch_size, num_boxes, 4] representing the
coordinates of the normalized boxes.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
convert_strided_predictions_to_normalized_boxes
|
AvikantSrivastava/models
|
python
|
def convert_strided_predictions_to_normalized_boxes(boxes, stride, true_image_shapes):
"Converts predictions in the output space to normalized boxes.\n\n Boxes falling outside the valid image boundary are clipped to be on the\n boundary.\n\n Args:\n boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw\n coordinates of boxes in the model's output space.\n stride: The stride in the output space.\n true_image_shapes: A tensor of shape [batch_size, 3] representing the true\n shape of the input not considering padding.\n\n Returns:\n boxes: A tensor of shape [batch_size, num_boxes, 4] representing the\n coordinates of the normalized boxes.\n "
def _normalize_boxlist(args):
(boxes, height, width) = args
boxes = box_list_ops.scale(boxes, stride, stride)
boxes = box_list_ops.to_normalized_coordinates(boxes, height, width)
boxes = box_list_ops.clip_to_window(boxes, [0.0, 0.0, 1.0, 1.0], filter_nonoverlapping=False)
return boxes
box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)]
(true_heights, true_widths, _) = tf.unstack(true_image_shapes, axis=1)
true_heights_list = tf.unstack(true_heights, axis=0)
true_widths_list = tf.unstack(true_widths, axis=0)
box_lists = list(map(_normalize_boxlist, zip(box_lists, true_heights_list, true_widths_list)))
boxes = tf.stack([box_list_instance.get() for box_list_instance in box_lists], axis=0)
return boxes
|
def convert_strided_predictions_to_normalized_keypoints(keypoint_coords, keypoint_scores, stride, true_image_shapes, clip_out_of_frame_keypoints=False):
"Converts predictions in the output space to normalized keypoints.\n\n If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside\n the valid image boundary are normalized but not clipped; If\n clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the\n valid image boundary are clipped to the closest image boundary and the scores\n will be set to 0.0.\n\n Args:\n keypoint_coords: A tensor of shape\n [batch_size, num_instances, num_keypoints, 2] holding the raw coordinates\n of keypoints in the model's output space.\n keypoint_scores: A tensor of shape\n [batch_size, num_instances, num_keypoints] holding the keypoint scores.\n stride: The stride in the output space.\n true_image_shapes: A tensor of shape [batch_size, 3] representing the true\n shape of the input not considering padding.\n clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside\n the image boundary should be clipped. If True, keypoint coords will be\n clipped to image boundary. If False, keypoints are normalized but not\n filtered based on their location.\n\n Returns:\n keypoint_coords_normalized: A tensor of shape\n [batch_size, num_instances, num_keypoints, 2] representing the coordinates\n of the normalized keypoints.\n keypoint_scores: A tensor of shape\n [batch_size, num_instances, num_keypoints] representing the updated\n keypoint scores.\n "
(batch_size, _, _, _) = shape_utils.combined_static_and_dynamic_shape(keypoint_coords)
(true_heights, true_widths, _) = tf.unstack(true_image_shapes, axis=1)
yscale = (float(stride) / tf.cast(true_heights, tf.float32))
xscale = (float(stride) / tf.cast(true_widths, tf.float32))
yx_scale = tf.stack([yscale, xscale], axis=1)
keypoint_coords_normalized = (keypoint_coords * tf.reshape(yx_scale, [batch_size, 1, 1, 2]))
if clip_out_of_frame_keypoints:
valid_indices = tf.logical_and(tf.logical_and((keypoint_coords_normalized[:, :, :, 0] >= 0.0), (keypoint_coords_normalized[:, :, :, 0] <= 1.0)), tf.logical_and((keypoint_coords_normalized[:, :, :, 1] >= 0.0), (keypoint_coords_normalized[:, :, :, 1] <= 1.0)))
batch_window = tf.tile(tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32), multiples=[batch_size, 1])
def clip_to_window(inputs):
(keypoints, window) = inputs
return keypoint_ops.clip_to_window(keypoints, window)
keypoint_coords_normalized = tf.map_fn(clip_to_window, (keypoint_coords_normalized, batch_window), dtype=tf.float32, back_prop=False)
keypoint_scores = tf.where(valid_indices, keypoint_scores, tf.zeros_like(keypoint_scores))
return (keypoint_coords_normalized, keypoint_scores)
| 1,464,973,746,934,306,600 |
Converts predictions in the output space to normalized keypoints.
If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside
the valid image boundary are normalized but not clipped; If
clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the
valid image boundary are clipped to the closest image boundary and the scores
will be set to 0.0.
Args:
keypoint_coords: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] holding the raw coordinates
of keypoints in the model's output space.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] holding the keypoint scores.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside
the image boundary should be clipped. If True, keypoint coords will be
clipped to image boundary. If False, keypoints are normalized but not
filtered based on their location.
Returns:
keypoint_coords_normalized: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] representing the coordinates
of the normalized keypoints.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] representing the updated
keypoint scores.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
convert_strided_predictions_to_normalized_keypoints
|
AvikantSrivastava/models
|
python
|
def convert_strided_predictions_to_normalized_keypoints(keypoint_coords, keypoint_scores, stride, true_image_shapes, clip_out_of_frame_keypoints=False):
"Converts predictions in the output space to normalized keypoints.\n\n If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside\n the valid image boundary are normalized but not clipped; If\n clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the\n valid image boundary are clipped to the closest image boundary and the scores\n will be set to 0.0.\n\n Args:\n keypoint_coords: A tensor of shape\n [batch_size, num_instances, num_keypoints, 2] holding the raw coordinates\n of keypoints in the model's output space.\n keypoint_scores: A tensor of shape\n [batch_size, num_instances, num_keypoints] holding the keypoint scores.\n stride: The stride in the output space.\n true_image_shapes: A tensor of shape [batch_size, 3] representing the true\n shape of the input not considering padding.\n clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside\n the image boundary should be clipped. If True, keypoint coords will be\n clipped to image boundary. If False, keypoints are normalized but not\n filtered based on their location.\n\n Returns:\n keypoint_coords_normalized: A tensor of shape\n [batch_size, num_instances, num_keypoints, 2] representing the coordinates\n of the normalized keypoints.\n keypoint_scores: A tensor of shape\n [batch_size, num_instances, num_keypoints] representing the updated\n keypoint scores.\n "
(batch_size, _, _, _) = shape_utils.combined_static_and_dynamic_shape(keypoint_coords)
(true_heights, true_widths, _) = tf.unstack(true_image_shapes, axis=1)
yscale = (float(stride) / tf.cast(true_heights, tf.float32))
xscale = (float(stride) / tf.cast(true_widths, tf.float32))
yx_scale = tf.stack([yscale, xscale], axis=1)
keypoint_coords_normalized = (keypoint_coords * tf.reshape(yx_scale, [batch_size, 1, 1, 2]))
if clip_out_of_frame_keypoints:
valid_indices = tf.logical_and(tf.logical_and((keypoint_coords_normalized[:, :, :, 0] >= 0.0), (keypoint_coords_normalized[:, :, :, 0] <= 1.0)), tf.logical_and((keypoint_coords_normalized[:, :, :, 1] >= 0.0), (keypoint_coords_normalized[:, :, :, 1] <= 1.0)))
batch_window = tf.tile(tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32), multiples=[batch_size, 1])
def clip_to_window(inputs):
(keypoints, window) = inputs
return keypoint_ops.clip_to_window(keypoints, window)
keypoint_coords_normalized = tf.map_fn(clip_to_window, (keypoint_coords_normalized, batch_window), dtype=tf.float32, back_prop=False)
keypoint_scores = tf.where(valid_indices, keypoint_scores, tf.zeros_like(keypoint_scores))
return (keypoint_coords_normalized, keypoint_scores)
|
def convert_strided_predictions_to_instance_masks(boxes, classes, masks, true_image_shapes, densepose_part_heatmap=None, densepose_surface_coords=None, stride=4, mask_height=256, mask_width=256, score_threshold=0.5, densepose_class_index=(- 1)):
'Converts predicted full-image masks into instance masks.\n\n For each predicted detection box:\n * Crop and resize the predicted mask (and optionally DensePose coordinates)\n based on the detected bounding box coordinates and class prediction. Uses\n bilinear resampling.\n * Binarize the mask using the provided score threshold.\n\n Args:\n boxes: A tensor of shape [batch, max_detections, 4] holding the predicted\n boxes, in normalized coordinates (relative to the true image dimensions).\n classes: An integer tensor of shape [batch, max_detections] containing the\n detected class for each box (0-indexed).\n masks: A [batch, output_height, output_width, num_classes] float32\n tensor with class probabilities.\n true_image_shapes: A tensor of shape [batch, 3] representing the true\n shape of the inputs not considering padding.\n densepose_part_heatmap: (Optional) A [batch, output_height, output_width,\n num_parts] float32 tensor with part scores (i.e. logits).\n densepose_surface_coords: (Optional) A [batch, output_height, output_width,\n 2 * num_parts] float32 tensor with predicted part coordinates (in\n vu-format).\n stride: The stride in the output space.\n mask_height: The desired resized height for instance masks.\n mask_width: The desired resized width for instance masks.\n score_threshold: The threshold at which to convert predicted mask\n into foreground pixels.\n densepose_class_index: The class index (0-indexed) corresponding to the\n class which has DensePose labels (e.g. person class).\n\n Returns:\n A tuple of masks and surface_coords.\n instance_masks: A [batch_size, max_detections, mask_height, mask_width]\n uint8 tensor with predicted foreground mask for each\n instance. If DensePose tensors are provided, then each pixel value in the\n mask encodes the 1-indexed part.\n surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]\n float32 tensor with (v, u) coordinates. Note that v, u coordinates are\n only defined on instance masks, and the coordinates at each location of\n the foreground mask correspond to coordinates on a local part coordinate\n system (the specific part can be inferred from the `instance_masks`\n output. If DensePose feature maps are not passed to this function, this\n output will be None.\n\n Raises:\n ValueError: If one but not both of `densepose_part_heatmap` and\n `densepose_surface_coords` is provided.\n '
(batch_size, output_height, output_width, _) = shape_utils.combined_static_and_dynamic_shape(masks)
input_height = (stride * output_height)
input_width = (stride * output_width)
(true_heights, true_widths, _) = tf.unstack(true_image_shapes, axis=1)
densepose_present = True
if ((densepose_part_heatmap is not None) ^ (densepose_surface_coords is not None)):
raise ValueError('To use DensePose, both `densepose_part_heatmap` and `densepose_surface_coords` must be provided')
if ((densepose_part_heatmap is None) and (densepose_surface_coords is None)):
densepose_present = False
densepose_part_heatmap = tf.zeros((batch_size, output_height, output_width, 1), dtype=tf.float32)
densepose_surface_coords = tf.zeros((batch_size, output_height, output_width, 2), dtype=tf.float32)
crop_and_threshold_fn = functools.partial(crop_and_threshold_masks, input_height=input_height, input_width=input_width, mask_height=mask_height, mask_width=mask_width, score_threshold=score_threshold, densepose_class_index=densepose_class_index)
(instance_masks, surface_coords) = shape_utils.static_or_dynamic_map_fn(crop_and_threshold_fn, elems=[boxes, classes, masks, densepose_part_heatmap, densepose_surface_coords, true_heights, true_widths], dtype=[tf.uint8, tf.float32], back_prop=False)
surface_coords = (surface_coords if densepose_present else None)
return (instance_masks, surface_coords)
| 5,333,232,042,033,351,000 |
Converts predicted full-image masks into instance masks.
For each predicted detection box:
* Crop and resize the predicted mask (and optionally DensePose coordinates)
based on the detected bounding box coordinates and class prediction. Uses
bilinear resampling.
* Binarize the mask using the provided score threshold.
Args:
boxes: A tensor of shape [batch, max_detections, 4] holding the predicted
boxes, in normalized coordinates (relative to the true image dimensions).
classes: An integer tensor of shape [batch, max_detections] containing the
detected class for each box (0-indexed).
masks: A [batch, output_height, output_width, num_classes] float32
tensor with class probabilities.
true_image_shapes: A tensor of shape [batch, 3] representing the true
shape of the inputs not considering padding.
densepose_part_heatmap: (Optional) A [batch, output_height, output_width,
num_parts] float32 tensor with part scores (i.e. logits).
densepose_surface_coords: (Optional) A [batch, output_height, output_width,
2 * num_parts] float32 tensor with predicted part coordinates (in
vu-format).
stride: The stride in the output space.
mask_height: The desired resized height for instance masks.
mask_width: The desired resized width for instance masks.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: The class index (0-indexed) corresponding to the
class which has DensePose labels (e.g. person class).
Returns:
A tuple of masks and surface_coords.
instance_masks: A [batch_size, max_detections, mask_height, mask_width]
uint8 tensor with predicted foreground mask for each
instance. If DensePose tensors are provided, then each pixel value in the
mask encodes the 1-indexed part.
surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. Note that v, u coordinates are
only defined on instance masks, and the coordinates at each location of
the foreground mask correspond to coordinates on a local part coordinate
system (the specific part can be inferred from the `instance_masks`
output. If DensePose feature maps are not passed to this function, this
output will be None.
Raises:
ValueError: If one but not both of `densepose_part_heatmap` and
`densepose_surface_coords` is provided.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
convert_strided_predictions_to_instance_masks
|
AvikantSrivastava/models
|
python
|
def convert_strided_predictions_to_instance_masks(boxes, classes, masks, true_image_shapes, densepose_part_heatmap=None, densepose_surface_coords=None, stride=4, mask_height=256, mask_width=256, score_threshold=0.5, densepose_class_index=(- 1)):
'Converts predicted full-image masks into instance masks.\n\n For each predicted detection box:\n * Crop and resize the predicted mask (and optionally DensePose coordinates)\n based on the detected bounding box coordinates and class prediction. Uses\n bilinear resampling.\n * Binarize the mask using the provided score threshold.\n\n Args:\n boxes: A tensor of shape [batch, max_detections, 4] holding the predicted\n boxes, in normalized coordinates (relative to the true image dimensions).\n classes: An integer tensor of shape [batch, max_detections] containing the\n detected class for each box (0-indexed).\n masks: A [batch, output_height, output_width, num_classes] float32\n tensor with class probabilities.\n true_image_shapes: A tensor of shape [batch, 3] representing the true\n shape of the inputs not considering padding.\n densepose_part_heatmap: (Optional) A [batch, output_height, output_width,\n num_parts] float32 tensor with part scores (i.e. logits).\n densepose_surface_coords: (Optional) A [batch, output_height, output_width,\n 2 * num_parts] float32 tensor with predicted part coordinates (in\n vu-format).\n stride: The stride in the output space.\n mask_height: The desired resized height for instance masks.\n mask_width: The desired resized width for instance masks.\n score_threshold: The threshold at which to convert predicted mask\n into foreground pixels.\n densepose_class_index: The class index (0-indexed) corresponding to the\n class which has DensePose labels (e.g. person class).\n\n Returns:\n A tuple of masks and surface_coords.\n instance_masks: A [batch_size, max_detections, mask_height, mask_width]\n uint8 tensor with predicted foreground mask for each\n instance. If DensePose tensors are provided, then each pixel value in the\n mask encodes the 1-indexed part.\n surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]\n float32 tensor with (v, u) coordinates. Note that v, u coordinates are\n only defined on instance masks, and the coordinates at each location of\n the foreground mask correspond to coordinates on a local part coordinate\n system (the specific part can be inferred from the `instance_masks`\n output. If DensePose feature maps are not passed to this function, this\n output will be None.\n\n Raises:\n ValueError: If one but not both of `densepose_part_heatmap` and\n `densepose_surface_coords` is provided.\n '
(batch_size, output_height, output_width, _) = shape_utils.combined_static_and_dynamic_shape(masks)
input_height = (stride * output_height)
input_width = (stride * output_width)
(true_heights, true_widths, _) = tf.unstack(true_image_shapes, axis=1)
densepose_present = True
if ((densepose_part_heatmap is not None) ^ (densepose_surface_coords is not None)):
raise ValueError('To use DensePose, both `densepose_part_heatmap` and `densepose_surface_coords` must be provided')
if ((densepose_part_heatmap is None) and (densepose_surface_coords is None)):
densepose_present = False
densepose_part_heatmap = tf.zeros((batch_size, output_height, output_width, 1), dtype=tf.float32)
densepose_surface_coords = tf.zeros((batch_size, output_height, output_width, 2), dtype=tf.float32)
crop_and_threshold_fn = functools.partial(crop_and_threshold_masks, input_height=input_height, input_width=input_width, mask_height=mask_height, mask_width=mask_width, score_threshold=score_threshold, densepose_class_index=densepose_class_index)
(instance_masks, surface_coords) = shape_utils.static_or_dynamic_map_fn(crop_and_threshold_fn, elems=[boxes, classes, masks, densepose_part_heatmap, densepose_surface_coords, true_heights, true_widths], dtype=[tf.uint8, tf.float32], back_prop=False)
surface_coords = (surface_coords if densepose_present else None)
return (instance_masks, surface_coords)
|
def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256, mask_width=256, score_threshold=0.5, densepose_class_index=(- 1)):
'Crops and thresholds masks based on detection boxes.\n\n Args:\n elems: A tuple of\n boxes - float32 tensor of shape [max_detections, 4]\n classes - int32 tensor of shape [max_detections] (0-indexed)\n masks - float32 tensor of shape [output_height, output_width, num_classes]\n part_heatmap - float32 tensor of shape [output_height, output_width,\n num_parts]\n surf_coords - float32 tensor of shape [output_height, output_width,\n 2 * num_parts]\n true_height - scalar int tensor\n true_width - scalar int tensor\n input_height: Input height to network.\n input_width: Input width to network.\n mask_height: Height for resizing mask crops.\n mask_width: Width for resizing mask crops.\n score_threshold: The threshold at which to convert predicted mask\n into foreground pixels.\n densepose_class_index: scalar int tensor with the class index (0-indexed)\n for DensePose.\n\n Returns:\n A tuple of\n all_instances: A [max_detections, mask_height, mask_width] uint8 tensor\n with a predicted foreground mask for each instance. Background is encoded\n as 0, and foreground is encoded as a positive integer. Specific part\n indices are encoded as 1-indexed parts (for classes that have part\n information).\n surface_coords: A [max_detections, mask_height, mask_width, 2]\n float32 tensor with (v, u) coordinates. for each part.\n '
(boxes, classes, masks, part_heatmap, surf_coords, true_height, true_width) = elems
boxlist = box_list.BoxList(boxes)
y_scale = (true_height / input_height)
x_scale = (true_width / input_width)
boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)
boxes = boxlist.get()
num_classes = tf.shape(masks)[(- 1)]
masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]
part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :], multiples=[num_classes, 1, 1, 1])
surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :], multiples=[num_classes, 1, 1, 1])
feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d], axis=(- 1))
cropped_masks = tf2.image.crop_and_resize(feature_maps_concat, boxes=boxes, box_indices=classes, crop_size=[mask_height, mask_width], method='bilinear')
num_parts = tf.shape(part_heatmap)[(- 1)]
(instance_masks, part_heatmap_cropped, surface_coords_cropped) = tf.split(cropped_masks, [1, num_parts, (2 * num_parts)], axis=(- 1))
instance_masks_int = tf.cast(tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)
det_with_parts = tf.equal(classes, densepose_class_index)
det_with_parts = tf.cast(tf.reshape(det_with_parts, [(- 1), 1, 1, 1]), dtype=tf.int32)
instance_masks_with_parts = tf.math.multiply(instance_masks_int, det_with_parts)
det_without_parts = (1 - det_with_parts)
instance_masks_without_parts = tf.math.multiply(instance_masks_int, det_without_parts)
part_mask_int_zero_indexed = tf.math.argmax(part_heatmap_cropped, axis=(- 1), output_type=tf.int32)[:, :, :, tf.newaxis]
part_mask_int_one_indexed = (part_mask_int_zero_indexed + 1)
all_instances = (instance_masks_without_parts + (instance_masks_with_parts * part_mask_int_one_indexed))
surface_coords_cropped = tf.reshape(surface_coords_cropped, [(- 1), mask_height, mask_width, num_parts, 2])
surface_coords = gather_surface_coords_for_parts(surface_coords_cropped, part_mask_int_zero_indexed)
surface_coords = (surface_coords * tf.cast(instance_masks_with_parts, tf.float32))
return [tf.squeeze(all_instances, axis=3), surface_coords]
| -162,861,961,586,320,220 |
Crops and thresholds masks based on detection boxes.
Args:
elems: A tuple of
boxes - float32 tensor of shape [max_detections, 4]
classes - int32 tensor of shape [max_detections] (0-indexed)
masks - float32 tensor of shape [output_height, output_width, num_classes]
part_heatmap - float32 tensor of shape [output_height, output_width,
num_parts]
surf_coords - float32 tensor of shape [output_height, output_width,
2 * num_parts]
true_height - scalar int tensor
true_width - scalar int tensor
input_height: Input height to network.
input_width: Input width to network.
mask_height: Height for resizing mask crops.
mask_width: Width for resizing mask crops.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: scalar int tensor with the class index (0-indexed)
for DensePose.
Returns:
A tuple of
all_instances: A [max_detections, mask_height, mask_width] uint8 tensor
with a predicted foreground mask for each instance. Background is encoded
as 0, and foreground is encoded as a positive integer. Specific part
indices are encoded as 1-indexed parts (for classes that have part
information).
surface_coords: A [max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. for each part.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
crop_and_threshold_masks
|
AvikantSrivastava/models
|
python
|
def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256, mask_width=256, score_threshold=0.5, densepose_class_index=(- 1)):
'Crops and thresholds masks based on detection boxes.\n\n Args:\n elems: A tuple of\n boxes - float32 tensor of shape [max_detections, 4]\n classes - int32 tensor of shape [max_detections] (0-indexed)\n masks - float32 tensor of shape [output_height, output_width, num_classes]\n part_heatmap - float32 tensor of shape [output_height, output_width,\n num_parts]\n surf_coords - float32 tensor of shape [output_height, output_width,\n 2 * num_parts]\n true_height - scalar int tensor\n true_width - scalar int tensor\n input_height: Input height to network.\n input_width: Input width to network.\n mask_height: Height for resizing mask crops.\n mask_width: Width for resizing mask crops.\n score_threshold: The threshold at which to convert predicted mask\n into foreground pixels.\n densepose_class_index: scalar int tensor with the class index (0-indexed)\n for DensePose.\n\n Returns:\n A tuple of\n all_instances: A [max_detections, mask_height, mask_width] uint8 tensor\n with a predicted foreground mask for each instance. Background is encoded\n as 0, and foreground is encoded as a positive integer. Specific part\n indices are encoded as 1-indexed parts (for classes that have part\n information).\n surface_coords: A [max_detections, mask_height, mask_width, 2]\n float32 tensor with (v, u) coordinates. for each part.\n '
(boxes, classes, masks, part_heatmap, surf_coords, true_height, true_width) = elems
boxlist = box_list.BoxList(boxes)
y_scale = (true_height / input_height)
x_scale = (true_width / input_width)
boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)
boxes = boxlist.get()
num_classes = tf.shape(masks)[(- 1)]
masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]
part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :], multiples=[num_classes, 1, 1, 1])
surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :], multiples=[num_classes, 1, 1, 1])
feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d], axis=(- 1))
cropped_masks = tf2.image.crop_and_resize(feature_maps_concat, boxes=boxes, box_indices=classes, crop_size=[mask_height, mask_width], method='bilinear')
num_parts = tf.shape(part_heatmap)[(- 1)]
(instance_masks, part_heatmap_cropped, surface_coords_cropped) = tf.split(cropped_masks, [1, num_parts, (2 * num_parts)], axis=(- 1))
instance_masks_int = tf.cast(tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)
det_with_parts = tf.equal(classes, densepose_class_index)
det_with_parts = tf.cast(tf.reshape(det_with_parts, [(- 1), 1, 1, 1]), dtype=tf.int32)
instance_masks_with_parts = tf.math.multiply(instance_masks_int, det_with_parts)
det_without_parts = (1 - det_with_parts)
instance_masks_without_parts = tf.math.multiply(instance_masks_int, det_without_parts)
part_mask_int_zero_indexed = tf.math.argmax(part_heatmap_cropped, axis=(- 1), output_type=tf.int32)[:, :, :, tf.newaxis]
part_mask_int_one_indexed = (part_mask_int_zero_indexed + 1)
all_instances = (instance_masks_without_parts + (instance_masks_with_parts * part_mask_int_one_indexed))
surface_coords_cropped = tf.reshape(surface_coords_cropped, [(- 1), mask_height, mask_width, num_parts, 2])
surface_coords = gather_surface_coords_for_parts(surface_coords_cropped, part_mask_int_zero_indexed)
surface_coords = (surface_coords * tf.cast(instance_masks_with_parts, tf.float32))
return [tf.squeeze(all_instances, axis=3), surface_coords]
|
def gather_surface_coords_for_parts(surface_coords_cropped, highest_scoring_part):
'Gathers the (v, u) coordinates for the highest scoring DensePose parts.\n\n Args:\n surface_coords_cropped: A [max_detections, height, width, num_parts, 2]\n float32 tensor with (v, u) surface coordinates.\n highest_scoring_part: A [max_detections, height, width] integer tensor with\n the highest scoring part (0-indexed) indices for each location.\n\n Returns:\n A [max_detections, height, width, 2] float32 tensor with the (v, u)\n coordinates selected from the highest scoring parts.\n '
(max_detections, height, width, num_parts, _) = shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped)
flattened_surface_coords = tf.reshape(surface_coords_cropped, [(- 1), 2])
flattened_part_ids = tf.reshape(highest_scoring_part, [(- 1)])
flattened_lookup_indices = ((num_parts * tf.range(((max_detections * height) * width))) + flattened_part_ids)
vu_coords_flattened = tf.gather(flattened_surface_coords, flattened_lookup_indices, axis=0)
return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])
| 5,027,404,084,321,661,000 |
Gathers the (v, u) coordinates for the highest scoring DensePose parts.
Args:
surface_coords_cropped: A [max_detections, height, width, num_parts, 2]
float32 tensor with (v, u) surface coordinates.
highest_scoring_part: A [max_detections, height, width] integer tensor with
the highest scoring part (0-indexed) indices for each location.
Returns:
A [max_detections, height, width, 2] float32 tensor with the (v, u)
coordinates selected from the highest scoring parts.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
gather_surface_coords_for_parts
|
AvikantSrivastava/models
|
python
|
def gather_surface_coords_for_parts(surface_coords_cropped, highest_scoring_part):
'Gathers the (v, u) coordinates for the highest scoring DensePose parts.\n\n Args:\n surface_coords_cropped: A [max_detections, height, width, num_parts, 2]\n float32 tensor with (v, u) surface coordinates.\n highest_scoring_part: A [max_detections, height, width] integer tensor with\n the highest scoring part (0-indexed) indices for each location.\n\n Returns:\n A [max_detections, height, width, 2] float32 tensor with the (v, u)\n coordinates selected from the highest scoring parts.\n '
(max_detections, height, width, num_parts, _) = shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped)
flattened_surface_coords = tf.reshape(surface_coords_cropped, [(- 1), 2])
flattened_part_ids = tf.reshape(highest_scoring_part, [(- 1)])
flattened_lookup_indices = ((num_parts * tf.range(((max_detections * height) * width))) + flattened_part_ids)
vu_coords_flattened = tf.gather(flattened_surface_coords, flattened_lookup_indices, axis=0)
return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])
|
def predicted_embeddings_at_object_centers(embedding_predictions, y_indices, x_indices):
'Returns the predicted embeddings at specified object centers.\n\n Args:\n embedding_predictions: A float tensor of shape [batch_size, height, width,\n reid_embed_size] holding predicted embeddings.\n y_indices: A [batch, num_instances] int tensor holding y indices for object\n centers. These indices correspond to locations in the output feature map.\n x_indices: A [batch, num_instances] int tensor holding x indices for object\n centers. These indices correspond to locations in the output feature map.\n\n Returns:\n A float tensor of shape [batch_size, num_objects, reid_embed_size] where\n predicted embeddings are gathered at the provided locations.\n '
(batch_size, _, width, _) = _get_shape(embedding_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
(_, num_instances) = _get_shape(flattened_indices, 2)
embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)
embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)
embeddings = tf.reshape(embeddings, [batch_size, num_instances, (- 1)])
return embeddings
| -4,203,698,444,624,568,000 |
Returns the predicted embeddings at specified object centers.
Args:
embedding_predictions: A float tensor of shape [batch_size, height, width,
reid_embed_size] holding predicted embeddings.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, reid_embed_size] where
predicted embeddings are gathered at the provided locations.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
predicted_embeddings_at_object_centers
|
AvikantSrivastava/models
|
python
|
def predicted_embeddings_at_object_centers(embedding_predictions, y_indices, x_indices):
'Returns the predicted embeddings at specified object centers.\n\n Args:\n embedding_predictions: A float tensor of shape [batch_size, height, width,\n reid_embed_size] holding predicted embeddings.\n y_indices: A [batch, num_instances] int tensor holding y indices for object\n centers. These indices correspond to locations in the output feature map.\n x_indices: A [batch, num_instances] int tensor holding x indices for object\n centers. These indices correspond to locations in the output feature map.\n\n Returns:\n A float tensor of shape [batch_size, num_objects, reid_embed_size] where\n predicted embeddings are gathered at the provided locations.\n '
(batch_size, _, width, _) = _get_shape(embedding_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
(_, num_instances) = _get_shape(flattened_indices, 2)
embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)
embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)
embeddings = tf.reshape(embeddings, [batch_size, num_instances, (- 1)])
return embeddings
|
def get_num_instances_from_weights(groundtruth_weights_list):
'Computes the number of instances/boxes from the weights in a batch.\n\n Args:\n groundtruth_weights_list: A list of float tensors with shape\n [max_num_instances] representing whether there is an actual instance in\n the image (with non-zero value) or is padded to match the\n max_num_instances (with value 0.0). The list represents the batch\n dimension.\n\n Returns:\n A scalar integer tensor incidating how many instances/boxes are in the\n images in the batch. Note that this function is usually used to normalize\n the loss so the minimum return value is 1 to avoid weird behavior.\n '
num_instances = tf.reduce_sum([tf.math.count_nonzero(w) for w in groundtruth_weights_list])
num_instances = tf.maximum(num_instances, 1)
return num_instances
| -1,074,146,280,008,215,800 |
Computes the number of instances/boxes from the weights in a batch.
Args:
groundtruth_weights_list: A list of float tensors with shape
[max_num_instances] representing whether there is an actual instance in
the image (with non-zero value) or is padded to match the
max_num_instances (with value 0.0). The list represents the batch
dimension.
Returns:
A scalar integer tensor incidating how many instances/boxes are in the
images in the batch. Note that this function is usually used to normalize
the loss so the minimum return value is 1 to avoid weird behavior.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
get_num_instances_from_weights
|
AvikantSrivastava/models
|
python
|
def get_num_instances_from_weights(groundtruth_weights_list):
'Computes the number of instances/boxes from the weights in a batch.\n\n Args:\n groundtruth_weights_list: A list of float tensors with shape\n [max_num_instances] representing whether there is an actual instance in\n the image (with non-zero value) or is padded to match the\n max_num_instances (with value 0.0). The list represents the batch\n dimension.\n\n Returns:\n A scalar integer tensor incidating how many instances/boxes are in the\n images in the batch. Note that this function is usually used to normalize\n the loss so the minimum return value is 1 to avoid weird behavior.\n '
num_instances = tf.reduce_sum([tf.math.count_nonzero(w) for w in groundtruth_weights_list])
num_instances = tf.maximum(num_instances, 1)
return num_instances
|
def __init__(self, name=None, channel_means=(0.0, 0.0, 0.0), channel_stds=(1.0, 1.0, 1.0), bgr_ordering=False):
'Initializes a CenterNet feature extractor.\n\n Args:\n name: str, the name used for the underlying keras model.\n channel_means: A tuple of floats, denoting the mean of each channel\n which will be subtracted from it. If None or empty, we use 0s.\n channel_stds: A tuple of floats, denoting the standard deviation of each\n channel. Each channel will be divided by its standard deviation value.\n If None or empty, we use 1s.\n bgr_ordering: bool, if set will change the channel ordering to be in the\n [blue, red, green] order.\n '
super(CenterNetFeatureExtractor, self).__init__(name=name)
if ((channel_means is None) or (len(channel_means) == 0)):
channel_means = [0.0, 0.0, 0.0]
if ((channel_stds is None) or (len(channel_stds) == 0)):
channel_stds = [1.0, 1.0, 1.0]
self._channel_means = channel_means
self._channel_stds = channel_stds
self._bgr_ordering = bgr_ordering
| 3,949,862,726,365,284,000 |
Initializes a CenterNet feature extractor.
Args:
name: str, the name used for the underlying keras model.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it. If None or empty, we use 0s.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
If None or empty, we use 1s.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__init__
|
AvikantSrivastava/models
|
python
|
def __init__(self, name=None, channel_means=(0.0, 0.0, 0.0), channel_stds=(1.0, 1.0, 1.0), bgr_ordering=False):
'Initializes a CenterNet feature extractor.\n\n Args:\n name: str, the name used for the underlying keras model.\n channel_means: A tuple of floats, denoting the mean of each channel\n which will be subtracted from it. If None or empty, we use 0s.\n channel_stds: A tuple of floats, denoting the standard deviation of each\n channel. Each channel will be divided by its standard deviation value.\n If None or empty, we use 1s.\n bgr_ordering: bool, if set will change the channel ordering to be in the\n [blue, red, green] order.\n '
super(CenterNetFeatureExtractor, self).__init__(name=name)
if ((channel_means is None) or (len(channel_means) == 0)):
channel_means = [0.0, 0.0, 0.0]
if ((channel_stds is None) or (len(channel_stds) == 0)):
channel_stds = [1.0, 1.0, 1.0]
self._channel_means = channel_means
self._channel_stds = channel_stds
self._bgr_ordering = bgr_ordering
|
def preprocess(self, inputs):
'Converts a batch of unscaled images to a scale suitable for the model.\n\n This method normalizes the image using the given `channel_means` and\n `channels_stds` values at initialization time while optionally flipping\n the channel order if `bgr_ordering` is set.\n\n Args:\n inputs: a [batch, height, width, channels] float32 tensor\n\n Returns:\n outputs: a [batch, height, width, channels] float32 tensor\n\n '
if self._bgr_ordering:
(red, green, blue) = tf.unstack(inputs, axis=3)
inputs = tf.stack([blue, green, red], axis=3)
channel_means = tf.reshape(tf.constant(self._channel_means), [1, 1, 1, (- 1)])
channel_stds = tf.reshape(tf.constant(self._channel_stds), [1, 1, 1, (- 1)])
return ((inputs - channel_means) / channel_stds)
| -6,190,530,192,781,071,000 |
Converts a batch of unscaled images to a scale suitable for the model.
This method normalizes the image using the given `channel_means` and
`channels_stds` values at initialization time while optionally flipping
the channel order if `bgr_ordering` is set.
Args:
inputs: a [batch, height, width, channels] float32 tensor
Returns:
outputs: a [batch, height, width, channels] float32 tensor
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
preprocess
|
AvikantSrivastava/models
|
python
|
def preprocess(self, inputs):
'Converts a batch of unscaled images to a scale suitable for the model.\n\n This method normalizes the image using the given `channel_means` and\n `channels_stds` values at initialization time while optionally flipping\n the channel order if `bgr_ordering` is set.\n\n Args:\n inputs: a [batch, height, width, channels] float32 tensor\n\n Returns:\n outputs: a [batch, height, width, channels] float32 tensor\n\n '
if self._bgr_ordering:
(red, green, blue) = tf.unstack(inputs, axis=3)
inputs = tf.stack([blue, green, red], axis=3)
channel_means = tf.reshape(tf.constant(self._channel_means), [1, 1, 1, (- 1)])
channel_stds = tf.reshape(tf.constant(self._channel_stds), [1, 1, 1, (- 1)])
return ((inputs - channel_means) / channel_stds)
|
@property
@abc.abstractmethod
def out_stride(self):
'The stride in the output image of the network.'
pass
| -2,731,785,466,358,025,700 |
The stride in the output image of the network.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
out_stride
|
AvikantSrivastava/models
|
python
|
@property
@abc.abstractmethod
def out_stride(self):
pass
|
@property
@abc.abstractmethod
def num_feature_outputs(self):
'Ther number of feature outputs returned by the feature extractor.'
pass
| -5,547,701,700,219,693,000 |
Ther number of feature outputs returned by the feature extractor.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
num_feature_outputs
|
AvikantSrivastava/models
|
python
|
@property
@abc.abstractmethod
def num_feature_outputs(self):
pass
|
@property
@abc.abstractmethod
def supported_sub_model_types(self):
'Valid sub model types supported by the get_sub_model function.'
pass
| -2,112,042,718,320,665,900 |
Valid sub model types supported by the get_sub_model function.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
supported_sub_model_types
|
AvikantSrivastava/models
|
python
|
@property
@abc.abstractmethod
def supported_sub_model_types(self):
pass
|
@abc.abstractmethod
def get_sub_model(self, sub_model_type):
"Returns the underlying keras model for the given sub_model_type.\n\n This function is useful when we only want to get a subset of weights to\n be restored from a checkpoint.\n\n Args:\n sub_model_type: string, the type of sub model. Currently, CenterNet\n feature extractors support 'detection' and 'classification'.\n "
pass
| -1,582,457,041,655,619,000 |
Returns the underlying keras model for the given sub_model_type.
This function is useful when we only want to get a subset of weights to
be restored from a checkpoint.
Args:
sub_model_type: string, the type of sub model. Currently, CenterNet
feature extractors support 'detection' and 'classification'.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
get_sub_model
|
AvikantSrivastava/models
|
python
|
@abc.abstractmethod
def get_sub_model(self, sub_model_type):
"Returns the underlying keras model for the given sub_model_type.\n\n This function is useful when we only want to get a subset of weights to\n be restored from a checkpoint.\n\n Args:\n sub_model_type: string, the type of sub model. Currently, CenterNet\n feature extractors support 'detection' and 'classification'.\n "
pass
|
def __new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight=1.0):
'Constructor with default values for ObjectDetectionParams.\n\n Args:\n localization_loss: a object_detection.core.losses.Loss object to compute\n the loss for the center offset and height/width predictions in\n CenterNet.\n scale_loss_weight: float, The weight for localizing box size. Note that\n the scale loss is dependent on the input image size, since we penalize\n the raw height and width. This constant may need to be adjusted\n depending on the input size.\n offset_loss_weight: float, The weight for localizing center offsets.\n task_loss_weight: float, the weight of the object detection loss.\n\n Returns:\n An initialized ObjectDetectionParams namedtuple.\n '
return super(ObjectDetectionParams, cls).__new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight)
| 319,322,182,275,863,940 |
Constructor with default values for ObjectDetectionParams.
Args:
localization_loss: a object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
scale_loss_weight: float, The weight for localizing box size. Note that
the scale loss is dependent on the input image size, since we penalize
the raw height and width. This constant may need to be adjusted
depending on the input size.
offset_loss_weight: float, The weight for localizing center offsets.
task_loss_weight: float, the weight of the object detection loss.
Returns:
An initialized ObjectDetectionParams namedtuple.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__new__
|
AvikantSrivastava/models
|
python
|
def __new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight=1.0):
'Constructor with default values for ObjectDetectionParams.\n\n Args:\n localization_loss: a object_detection.core.losses.Loss object to compute\n the loss for the center offset and height/width predictions in\n CenterNet.\n scale_loss_weight: float, The weight for localizing box size. Note that\n the scale loss is dependent on the input image size, since we penalize\n the raw height and width. This constant may need to be adjusted\n depending on the input size.\n offset_loss_weight: float, The weight for localizing center offsets.\n task_loss_weight: float, the weight of the object detection loss.\n\n Returns:\n An initialized ObjectDetectionParams namedtuple.\n '
return super(ObjectDetectionParams, cls).__new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight)
|
def __new__(cls, task_name, class_id, keypoint_indices, classification_loss, localization_loss, keypoint_labels=None, keypoint_std_dev=None, keypoint_heatmap_loss_weight=1.0, keypoint_offset_loss_weight=1.0, keypoint_regression_loss_weight=1.0, keypoint_candidate_score_threshold=0.1, heatmap_bias_init=(- 2.19), num_candidates_per_keypoint=100, task_loss_weight=1.0, peak_max_pool_kernel_size=3, unmatched_keypoint_score=0.1, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance', offset_peak_radius=0, per_keypoint_offset=False):
'Constructor with default values for KeypointEstimationParams.\n\n Args:\n task_name: string, the name of the task this namedtuple corresponds to.\n Note that it should be an unique identifier of the task.\n class_id: int, the ID of the class that contains the target keypoints to\n considered in this task. For example, if the task is human pose\n estimation, the class id should correspond to the "human" class. Note\n that the ID is 0-based, meaning that class 0 corresponds to the first\n non-background object class.\n keypoint_indices: A list of integers representing the indicies of the\n keypoints to be considered in this task. This is used to retrieve the\n subset of the keypoints from gt_keypoints that should be considered in\n this task.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n localization_loss: an object_detection.core.losses.Loss object to compute\n the loss for the center offset and height/width predictions in\n CenterNet.\n keypoint_labels: A list of strings representing the label text of each\n keypoint, e.g. "nose", \'left_shoulder". Note that the length of this\n list should be equal to keypoint_indices.\n keypoint_std_dev: A list of float represent the standard deviation of the\n Gaussian kernel used to generate the keypoint heatmap. It is to provide\n the flexibility of using different sizes of Gaussian kernel for each\n keypoint class.\n keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.\n keypoint_offset_loss_weight: float, The weight for the keypoint offsets\n loss.\n keypoint_regression_loss_weight: float, The weight for keypoint regression\n loss. Note that the loss is dependent on the input image size, since we\n penalize the raw height and width. This constant may need to be adjusted\n depending on the input size.\n keypoint_candidate_score_threshold: float, The heatmap score threshold for\n a keypoint to become a valid candidate.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the class prediction head. If set to None, the bias is\n initialized with zeros.\n num_candidates_per_keypoint: The maximum number of candidates to retrieve\n for each keypoint.\n task_loss_weight: float, the weight of the keypoint estimation loss.\n peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak\n score locations in a neighborhood (independently for each keypoint\n types).\n unmatched_keypoint_score: The default score to use for regressed keypoints\n that are not successfully snapped to a nearby candidate.\n box_scale: The multiplier to expand the bounding boxes (either the\n provided boxes or those which tightly cover the regressed keypoints).\n candidate_search_scale: The scale parameter that multiplies the largest\n dimension of a bounding box. The resulting distance becomes a search\n radius for candidates in the vicinity of each regressed keypoint.\n candidate_ranking_mode: One of [\'min_distance\', \'score_distance_ratio\']\n indicating how to select the keypoint candidate.\n offset_peak_radius: The radius (in the unit of output pixel) around\n groundtruth heatmap peak to assign the offset targets. If set 0, then\n the offset target will only be assigned to the heatmap peak (same\n behavior as the original paper).\n per_keypoint_offset: A bool indicates whether to assign offsets for each\n keypoint channel separately. If set False, the output offset target has\n the shape [batch_size, out_height, out_width, 2] (same behavior as the\n original paper). If set True, the output offset target has the shape\n [batch_size, out_height, out_width, 2 * num_keypoints] (recommended when\n the offset_peak_radius is not zero).\n\n Returns:\n An initialized KeypointEstimationParams namedtuple.\n '
return super(KeypointEstimationParams, cls).__new__(cls, task_name, class_id, keypoint_indices, classification_loss, localization_loss, keypoint_labels, keypoint_std_dev, keypoint_heatmap_loss_weight, keypoint_offset_loss_weight, keypoint_regression_loss_weight, keypoint_candidate_score_threshold, heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight, peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale, candidate_search_scale, candidate_ranking_mode, offset_peak_radius, per_keypoint_offset)
| 1,224,667,856,985,538,300 |
Constructor with default values for KeypointEstimationParams.
Args:
task_name: string, the name of the task this namedtuple corresponds to.
Note that it should be an unique identifier of the task.
class_id: int, the ID of the class that contains the target keypoints to
considered in this task. For example, if the task is human pose
estimation, the class id should correspond to the "human" class. Note
that the ID is 0-based, meaning that class 0 corresponds to the first
non-background object class.
keypoint_indices: A list of integers representing the indicies of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
keypoint_labels: A list of strings representing the label text of each
keypoint, e.g. "nose", 'left_shoulder". Note that the length of this
list should be equal to keypoint_indices.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap. It is to provide
the flexibility of using different sizes of Gaussian kernel for each
keypoint class.
keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.
keypoint_offset_loss_weight: float, The weight for the keypoint offsets
loss.
keypoint_regression_loss_weight: float, The weight for keypoint regression
loss. Note that the loss is dependent on the input image size, since we
penalize the raw height and width. This constant may need to be adjusted
depending on the input size.
keypoint_candidate_score_threshold: float, The heatmap score threshold for
a keypoint to become a valid candidate.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the class prediction head. If set to None, the bias is
initialized with zeros.
num_candidates_per_keypoint: The maximum number of candidates to retrieve
for each keypoint.
task_loss_weight: float, the weight of the keypoint estimation loss.
peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak
score locations in a neighborhood (independently for each keypoint
types).
unmatched_keypoint_score: The default score to use for regressed keypoints
that are not successfully snapped to a nearby candidate.
box_scale: The multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints).
candidate_search_scale: The scale parameter that multiplies the largest
dimension of a bounding box. The resulting distance becomes a search
radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio']
indicating how to select the keypoint candidate.
offset_peak_radius: The radius (in the unit of output pixel) around
groundtruth heatmap peak to assign the offset targets. If set 0, then
the offset target will only be assigned to the heatmap peak (same
behavior as the original paper).
per_keypoint_offset: A bool indicates whether to assign offsets for each
keypoint channel separately. If set False, the output offset target has
the shape [batch_size, out_height, out_width, 2] (same behavior as the
original paper). If set True, the output offset target has the shape
[batch_size, out_height, out_width, 2 * num_keypoints] (recommended when
the offset_peak_radius is not zero).
Returns:
An initialized KeypointEstimationParams namedtuple.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__new__
|
AvikantSrivastava/models
|
python
|
def __new__(cls, task_name, class_id, keypoint_indices, classification_loss, localization_loss, keypoint_labels=None, keypoint_std_dev=None, keypoint_heatmap_loss_weight=1.0, keypoint_offset_loss_weight=1.0, keypoint_regression_loss_weight=1.0, keypoint_candidate_score_threshold=0.1, heatmap_bias_init=(- 2.19), num_candidates_per_keypoint=100, task_loss_weight=1.0, peak_max_pool_kernel_size=3, unmatched_keypoint_score=0.1, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance', offset_peak_radius=0, per_keypoint_offset=False):
'Constructor with default values for KeypointEstimationParams.\n\n Args:\n task_name: string, the name of the task this namedtuple corresponds to.\n Note that it should be an unique identifier of the task.\n class_id: int, the ID of the class that contains the target keypoints to\n considered in this task. For example, if the task is human pose\n estimation, the class id should correspond to the "human" class. Note\n that the ID is 0-based, meaning that class 0 corresponds to the first\n non-background object class.\n keypoint_indices: A list of integers representing the indicies of the\n keypoints to be considered in this task. This is used to retrieve the\n subset of the keypoints from gt_keypoints that should be considered in\n this task.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n localization_loss: an object_detection.core.losses.Loss object to compute\n the loss for the center offset and height/width predictions in\n CenterNet.\n keypoint_labels: A list of strings representing the label text of each\n keypoint, e.g. "nose", \'left_shoulder". Note that the length of this\n list should be equal to keypoint_indices.\n keypoint_std_dev: A list of float represent the standard deviation of the\n Gaussian kernel used to generate the keypoint heatmap. It is to provide\n the flexibility of using different sizes of Gaussian kernel for each\n keypoint class.\n keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.\n keypoint_offset_loss_weight: float, The weight for the keypoint offsets\n loss.\n keypoint_regression_loss_weight: float, The weight for keypoint regression\n loss. Note that the loss is dependent on the input image size, since we\n penalize the raw height and width. This constant may need to be adjusted\n depending on the input size.\n keypoint_candidate_score_threshold: float, The heatmap score threshold for\n a keypoint to become a valid candidate.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the class prediction head. If set to None, the bias is\n initialized with zeros.\n num_candidates_per_keypoint: The maximum number of candidates to retrieve\n for each keypoint.\n task_loss_weight: float, the weight of the keypoint estimation loss.\n peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak\n score locations in a neighborhood (independently for each keypoint\n types).\n unmatched_keypoint_score: The default score to use for regressed keypoints\n that are not successfully snapped to a nearby candidate.\n box_scale: The multiplier to expand the bounding boxes (either the\n provided boxes or those which tightly cover the regressed keypoints).\n candidate_search_scale: The scale parameter that multiplies the largest\n dimension of a bounding box. The resulting distance becomes a search\n radius for candidates in the vicinity of each regressed keypoint.\n candidate_ranking_mode: One of [\'min_distance\', \'score_distance_ratio\']\n indicating how to select the keypoint candidate.\n offset_peak_radius: The radius (in the unit of output pixel) around\n groundtruth heatmap peak to assign the offset targets. If set 0, then\n the offset target will only be assigned to the heatmap peak (same\n behavior as the original paper).\n per_keypoint_offset: A bool indicates whether to assign offsets for each\n keypoint channel separately. If set False, the output offset target has\n the shape [batch_size, out_height, out_width, 2] (same behavior as the\n original paper). If set True, the output offset target has the shape\n [batch_size, out_height, out_width, 2 * num_keypoints] (recommended when\n the offset_peak_radius is not zero).\n\n Returns:\n An initialized KeypointEstimationParams namedtuple.\n '
return super(KeypointEstimationParams, cls).__new__(cls, task_name, class_id, keypoint_indices, classification_loss, localization_loss, keypoint_labels, keypoint_std_dev, keypoint_heatmap_loss_weight, keypoint_offset_loss_weight, keypoint_regression_loss_weight, keypoint_candidate_score_threshold, heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight, peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale, candidate_search_scale, candidate_ranking_mode, offset_peak_radius, per_keypoint_offset)
|
def __new__(cls, classification_loss, object_center_loss_weight, heatmap_bias_init=(- 2.19), min_box_overlap_iou=0.7, max_box_predictions=100, use_labeled_classes=False):
'Constructor with default values for ObjectCenterParams.\n\n Args:\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n object_center_loss_weight: float, The weight for the object center loss.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the object center prediction head. If set to None, the bias is\n initialized with zeros.\n min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes\n need have with groundtruth boxes to not be penalized. This is used for\n computing the class specific center heatmaps.\n max_box_predictions: int, the maximum number of boxes to predict.\n use_labeled_classes: boolean, compute the loss only labeled classes.\n\n Returns:\n An initialized ObjectCenterParams namedtuple.\n '
return super(ObjectCenterParams, cls).__new__(cls, classification_loss, object_center_loss_weight, heatmap_bias_init, min_box_overlap_iou, max_box_predictions, use_labeled_classes)
| -244,050,174,111,315,200 |
Constructor with default values for ObjectCenterParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
object_center_loss_weight: float, The weight for the object center loss.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the object center prediction head. If set to None, the bias is
initialized with zeros.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
max_box_predictions: int, the maximum number of boxes to predict.
use_labeled_classes: boolean, compute the loss only labeled classes.
Returns:
An initialized ObjectCenterParams namedtuple.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__new__
|
AvikantSrivastava/models
|
python
|
def __new__(cls, classification_loss, object_center_loss_weight, heatmap_bias_init=(- 2.19), min_box_overlap_iou=0.7, max_box_predictions=100, use_labeled_classes=False):
'Constructor with default values for ObjectCenterParams.\n\n Args:\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n object_center_loss_weight: float, The weight for the object center loss.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the object center prediction head. If set to None, the bias is\n initialized with zeros.\n min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes\n need have with groundtruth boxes to not be penalized. This is used for\n computing the class specific center heatmaps.\n max_box_predictions: int, the maximum number of boxes to predict.\n use_labeled_classes: boolean, compute the loss only labeled classes.\n\n Returns:\n An initialized ObjectCenterParams namedtuple.\n '
return super(ObjectCenterParams, cls).__new__(cls, classification_loss, object_center_loss_weight, heatmap_bias_init, min_box_overlap_iou, max_box_predictions, use_labeled_classes)
|
def __new__(cls, classification_loss, task_loss_weight=1.0, mask_height=256, mask_width=256, score_threshold=0.5, heatmap_bias_init=(- 2.19)):
'Constructor with default values for MaskParams.\n\n Args:\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the semantic segmentation predictions in CenterNet.\n task_loss_weight: float, The loss weight for the segmentation task.\n mask_height: The height of the resized instance segmentation mask.\n mask_width: The width of the resized instance segmentation mask.\n score_threshold: The threshold at which to convert predicted mask\n probabilities (after passing through sigmoid) into foreground pixels.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the semantic segmentation prediction head. If set to None, the\n bias is initialized with zeros.\n\n Returns:\n An initialized MaskParams namedtuple.\n '
return super(MaskParams, cls).__new__(cls, classification_loss, task_loss_weight, mask_height, mask_width, score_threshold, heatmap_bias_init)
| 7,861,250,382,031,103,000 |
Constructor with default values for MaskParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the semantic segmentation predictions in CenterNet.
task_loss_weight: float, The loss weight for the segmentation task.
mask_height: The height of the resized instance segmentation mask.
mask_width: The width of the resized instance segmentation mask.
score_threshold: The threshold at which to convert predicted mask
probabilities (after passing through sigmoid) into foreground pixels.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the semantic segmentation prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized MaskParams namedtuple.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__new__
|
AvikantSrivastava/models
|
python
|
def __new__(cls, classification_loss, task_loss_weight=1.0, mask_height=256, mask_width=256, score_threshold=0.5, heatmap_bias_init=(- 2.19)):
'Constructor with default values for MaskParams.\n\n Args:\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the semantic segmentation predictions in CenterNet.\n task_loss_weight: float, The loss weight for the segmentation task.\n mask_height: The height of the resized instance segmentation mask.\n mask_width: The width of the resized instance segmentation mask.\n score_threshold: The threshold at which to convert predicted mask\n probabilities (after passing through sigmoid) into foreground pixels.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the semantic segmentation prediction head. If set to None, the\n bias is initialized with zeros.\n\n Returns:\n An initialized MaskParams namedtuple.\n '
return super(MaskParams, cls).__new__(cls, classification_loss, task_loss_weight, mask_height, mask_width, score_threshold, heatmap_bias_init)
|
def __new__(cls, class_id, classification_loss, localization_loss, part_loss_weight=1.0, coordinate_loss_weight=1.0, num_parts=24, task_loss_weight=1.0, upsample_to_input_res=True, upsample_method='bilinear', heatmap_bias_init=(- 2.19)):
'Constructor with default values for DensePoseParams.\n\n Args:\n class_id: the ID of the class that contains the DensePose groundtruth.\n This should typically correspond to the "person" class. Note that the ID\n is 0-based, meaning that class 0 corresponds to the first non-background\n object class.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the body part predictions in CenterNet.\n localization_loss: an object_detection.core.losses.Loss object to compute\n the loss for the surface coordinate regression in CenterNet.\n part_loss_weight: The loss weight to apply to part prediction.\n coordinate_loss_weight: The loss weight to apply to surface coordinate\n prediction.\n num_parts: The number of DensePose parts to predict.\n task_loss_weight: float, the loss weight for the DensePose task.\n upsample_to_input_res: Whether to upsample the DensePose feature maps to\n the input resolution before applying loss. Note that the prediction\n outputs are still at the standard CenterNet output stride.\n upsample_method: Method for upsampling DensePose feature maps. Options are\n either \'bilinear\' or \'nearest\'). This takes no effect when\n `upsample_to_input_res` is False.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the part prediction head. If set to None, the\n bias is initialized with zeros.\n\n Returns:\n An initialized DensePoseParams namedtuple.\n '
return super(DensePoseParams, cls).__new__(cls, class_id, classification_loss, localization_loss, part_loss_weight, coordinate_loss_weight, num_parts, task_loss_weight, upsample_to_input_res, upsample_method, heatmap_bias_init)
| 5,725,670,139,513,713,000 |
Constructor with default values for DensePoseParams.
Args:
class_id: the ID of the class that contains the DensePose groundtruth.
This should typically correspond to the "person" class. Note that the ID
is 0-based, meaning that class 0 corresponds to the first non-background
object class.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the body part predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the surface coordinate regression in CenterNet.
part_loss_weight: The loss weight to apply to part prediction.
coordinate_loss_weight: The loss weight to apply to surface coordinate
prediction.
num_parts: The number of DensePose parts to predict.
task_loss_weight: float, the loss weight for the DensePose task.
upsample_to_input_res: Whether to upsample the DensePose feature maps to
the input resolution before applying loss. Note that the prediction
outputs are still at the standard CenterNet output stride.
upsample_method: Method for upsampling DensePose feature maps. Options are
either 'bilinear' or 'nearest'). This takes no effect when
`upsample_to_input_res` is False.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the part prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized DensePoseParams namedtuple.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__new__
|
AvikantSrivastava/models
|
python
|
def __new__(cls, class_id, classification_loss, localization_loss, part_loss_weight=1.0, coordinate_loss_weight=1.0, num_parts=24, task_loss_weight=1.0, upsample_to_input_res=True, upsample_method='bilinear', heatmap_bias_init=(- 2.19)):
'Constructor with default values for DensePoseParams.\n\n Args:\n class_id: the ID of the class that contains the DensePose groundtruth.\n This should typically correspond to the "person" class. Note that the ID\n is 0-based, meaning that class 0 corresponds to the first non-background\n object class.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the body part predictions in CenterNet.\n localization_loss: an object_detection.core.losses.Loss object to compute\n the loss for the surface coordinate regression in CenterNet.\n part_loss_weight: The loss weight to apply to part prediction.\n coordinate_loss_weight: The loss weight to apply to surface coordinate\n prediction.\n num_parts: The number of DensePose parts to predict.\n task_loss_weight: float, the loss weight for the DensePose task.\n upsample_to_input_res: Whether to upsample the DensePose feature maps to\n the input resolution before applying loss. Note that the prediction\n outputs are still at the standard CenterNet output stride.\n upsample_method: Method for upsampling DensePose feature maps. Options are\n either \'bilinear\' or \'nearest\'). This takes no effect when\n `upsample_to_input_res` is False.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the part prediction head. If set to None, the\n bias is initialized with zeros.\n\n Returns:\n An initialized DensePoseParams namedtuple.\n '
return super(DensePoseParams, cls).__new__(cls, class_id, classification_loss, localization_loss, part_loss_weight, coordinate_loss_weight, num_parts, task_loss_weight, upsample_to_input_res, upsample_method, heatmap_bias_init)
|
def __new__(cls, num_track_ids, reid_embed_size, num_fc_layers, classification_loss, task_loss_weight=1.0):
'Constructor with default values for TrackParams.\n\n Args:\n num_track_ids: int. The maximum track ID in the dataset. Used for ReID\n embedding classification task.\n reid_embed_size: int. The embedding size for ReID task.\n num_fc_layers: int. The number of (fully-connected, batch-norm, relu)\n layers for track ID classification head.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the ReID embedding in CenterNet.\n task_loss_weight: float, the loss weight for the tracking task.\n\n Returns:\n An initialized TrackParams namedtuple.\n '
return super(TrackParams, cls).__new__(cls, num_track_ids, reid_embed_size, num_fc_layers, classification_loss, task_loss_weight)
| -5,018,960,529,862,917,000 |
Constructor with default values for TrackParams.
Args:
num_track_ids: int. The maximum track ID in the dataset. Used for ReID
embedding classification task.
reid_embed_size: int. The embedding size for ReID task.
num_fc_layers: int. The number of (fully-connected, batch-norm, relu)
layers for track ID classification head.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the ReID embedding in CenterNet.
task_loss_weight: float, the loss weight for the tracking task.
Returns:
An initialized TrackParams namedtuple.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__new__
|
AvikantSrivastava/models
|
python
|
def __new__(cls, num_track_ids, reid_embed_size, num_fc_layers, classification_loss, task_loss_weight=1.0):
'Constructor with default values for TrackParams.\n\n Args:\n num_track_ids: int. The maximum track ID in the dataset. Used for ReID\n embedding classification task.\n reid_embed_size: int. The embedding size for ReID task.\n num_fc_layers: int. The number of (fully-connected, batch-norm, relu)\n layers for track ID classification head.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the ReID embedding in CenterNet.\n task_loss_weight: float, the loss weight for the tracking task.\n\n Returns:\n An initialized TrackParams namedtuple.\n '
return super(TrackParams, cls).__new__(cls, num_track_ids, reid_embed_size, num_fc_layers, classification_loss, task_loss_weight)
|
def __new__(cls, localization_loss, task_loss_weight=1.0):
'Constructor with default values for TrackParams.\n\n Args:\n localization_loss: an object_detection.core.losses.Loss object to\n compute the loss for the temporal offset in CenterNet.\n task_loss_weight: float, the loss weight for the temporal offset\n task.\n\n Returns:\n An initialized TemporalOffsetParams namedtuple.\n '
return super(TemporalOffsetParams, cls).__new__(cls, localization_loss, task_loss_weight)
| 4,264,954,637,863,041,500 |
Constructor with default values for TrackParams.
Args:
localization_loss: an object_detection.core.losses.Loss object to
compute the loss for the temporal offset in CenterNet.
task_loss_weight: float, the loss weight for the temporal offset
task.
Returns:
An initialized TemporalOffsetParams namedtuple.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__new__
|
AvikantSrivastava/models
|
python
|
def __new__(cls, localization_loss, task_loss_weight=1.0):
'Constructor with default values for TrackParams.\n\n Args:\n localization_loss: an object_detection.core.losses.Loss object to\n compute the loss for the temporal offset in CenterNet.\n task_loss_weight: float, the loss weight for the temporal offset\n task.\n\n Returns:\n An initialized TemporalOffsetParams namedtuple.\n '
return super(TemporalOffsetParams, cls).__new__(cls, localization_loss, task_loss_weight)
|
def __init__(self, is_training, add_summaries, num_classes, feature_extractor, image_resizer_fn, object_center_params, object_detection_params=None, keypoint_params_dict=None, mask_params=None, densepose_params=None, track_params=None, temporal_offset_params=None):
'Initializes a CenterNet model.\n\n Args:\n is_training: Set to True if this model is being built for training.\n add_summaries: Whether to add tf summaries in the model.\n num_classes: int, The number of classes that the model should predict.\n feature_extractor: A CenterNetFeatureExtractor to use to extract features\n from an image.\n image_resizer_fn: a callable for image resizing. This callable always\n takes a rank-3 image tensor (corresponding to a single image) and\n returns a rank-3 image tensor, possibly with new spatial dimensions and\n a 1-D tensor of shape [3] indicating shape of true image within the\n resized image tensor as the resized image tensor could be padded. See\n builders/image_resizer_builder.py.\n object_center_params: An ObjectCenterParams namedtuple. This object holds\n the hyper-parameters for object center prediction. This is required by\n either object detection or keypoint estimation tasks.\n object_detection_params: An ObjectDetectionParams namedtuple. This object\n holds the hyper-parameters necessary for object detection. Please see\n the class definition for more details.\n keypoint_params_dict: A dictionary that maps from task name to the\n corresponding KeypointEstimationParams namedtuple. This object holds the\n hyper-parameters necessary for multiple keypoint estimations. Please\n see the class definition for more details.\n mask_params: A MaskParams namedtuple. This object\n holds the hyper-parameters for segmentation. Please see the class\n definition for more details.\n densepose_params: A DensePoseParams namedtuple. This object holds the\n hyper-parameters for DensePose prediction. Please see the class\n definition for more details. Note that if this is provided, it is\n expected that `mask_params` is also provided.\n track_params: A TrackParams namedtuple. This object\n holds the hyper-parameters for tracking. Please see the class\n definition for more details.\n temporal_offset_params: A TemporalOffsetParams namedtuple. This object\n holds the hyper-parameters for offset prediction based tracking.\n '
assert (object_detection_params or keypoint_params_dict)
self._is_training = is_training
self._feature_extractor = feature_extractor
self._num_feature_outputs = feature_extractor.num_feature_outputs
self._stride = self._feature_extractor.out_stride
self._image_resizer_fn = image_resizer_fn
self._center_params = object_center_params
self._od_params = object_detection_params
self._kp_params_dict = keypoint_params_dict
self._mask_params = mask_params
if ((densepose_params is not None) and (mask_params is None)):
raise ValueError('To run DensePose prediction, `mask_params` must also be supplied.')
self._densepose_params = densepose_params
self._track_params = track_params
self._temporal_offset_params = temporal_offset_params
self._prediction_head_dict = self._construct_prediction_heads(num_classes, self._num_feature_outputs, class_prediction_bias_init=self._center_params.heatmap_bias_init)
self._target_assigner_dict = self._initialize_target_assigners(stride=self._stride, min_box_overlap_iou=self._center_params.min_box_overlap_iou)
self._batched_prediction_tensor_names = []
super(CenterNetMetaArch, self).__init__(num_classes)
| 5,664,729,417,145,753,000 |
Initializes a CenterNet model.
Args:
is_training: Set to True if this model is being built for training.
add_summaries: Whether to add tf summaries in the model.
num_classes: int, The number of classes that the model should predict.
feature_extractor: A CenterNetFeatureExtractor to use to extract features
from an image.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within the
resized image tensor as the resized image tensor could be padded. See
builders/image_resizer_builder.py.
object_center_params: An ObjectCenterParams namedtuple. This object holds
the hyper-parameters for object center prediction. This is required by
either object detection or keypoint estimation tasks.
object_detection_params: An ObjectDetectionParams namedtuple. This object
holds the hyper-parameters necessary for object detection. Please see
the class definition for more details.
keypoint_params_dict: A dictionary that maps from task name to the
corresponding KeypointEstimationParams namedtuple. This object holds the
hyper-parameters necessary for multiple keypoint estimations. Please
see the class definition for more details.
mask_params: A MaskParams namedtuple. This object
holds the hyper-parameters for segmentation. Please see the class
definition for more details.
densepose_params: A DensePoseParams namedtuple. This object holds the
hyper-parameters for DensePose prediction. Please see the class
definition for more details. Note that if this is provided, it is
expected that `mask_params` is also provided.
track_params: A TrackParams namedtuple. This object
holds the hyper-parameters for tracking. Please see the class
definition for more details.
temporal_offset_params: A TemporalOffsetParams namedtuple. This object
holds the hyper-parameters for offset prediction based tracking.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
__init__
|
AvikantSrivastava/models
|
python
|
def __init__(self, is_training, add_summaries, num_classes, feature_extractor, image_resizer_fn, object_center_params, object_detection_params=None, keypoint_params_dict=None, mask_params=None, densepose_params=None, track_params=None, temporal_offset_params=None):
'Initializes a CenterNet model.\n\n Args:\n is_training: Set to True if this model is being built for training.\n add_summaries: Whether to add tf summaries in the model.\n num_classes: int, The number of classes that the model should predict.\n feature_extractor: A CenterNetFeatureExtractor to use to extract features\n from an image.\n image_resizer_fn: a callable for image resizing. This callable always\n takes a rank-3 image tensor (corresponding to a single image) and\n returns a rank-3 image tensor, possibly with new spatial dimensions and\n a 1-D tensor of shape [3] indicating shape of true image within the\n resized image tensor as the resized image tensor could be padded. See\n builders/image_resizer_builder.py.\n object_center_params: An ObjectCenterParams namedtuple. This object holds\n the hyper-parameters for object center prediction. This is required by\n either object detection or keypoint estimation tasks.\n object_detection_params: An ObjectDetectionParams namedtuple. This object\n holds the hyper-parameters necessary for object detection. Please see\n the class definition for more details.\n keypoint_params_dict: A dictionary that maps from task name to the\n corresponding KeypointEstimationParams namedtuple. This object holds the\n hyper-parameters necessary for multiple keypoint estimations. Please\n see the class definition for more details.\n mask_params: A MaskParams namedtuple. This object\n holds the hyper-parameters for segmentation. Please see the class\n definition for more details.\n densepose_params: A DensePoseParams namedtuple. This object holds the\n hyper-parameters for DensePose prediction. Please see the class\n definition for more details. Note that if this is provided, it is\n expected that `mask_params` is also provided.\n track_params: A TrackParams namedtuple. This object\n holds the hyper-parameters for tracking. Please see the class\n definition for more details.\n temporal_offset_params: A TemporalOffsetParams namedtuple. This object\n holds the hyper-parameters for offset prediction based tracking.\n '
assert (object_detection_params or keypoint_params_dict)
self._is_training = is_training
self._feature_extractor = feature_extractor
self._num_feature_outputs = feature_extractor.num_feature_outputs
self._stride = self._feature_extractor.out_stride
self._image_resizer_fn = image_resizer_fn
self._center_params = object_center_params
self._od_params = object_detection_params
self._kp_params_dict = keypoint_params_dict
self._mask_params = mask_params
if ((densepose_params is not None) and (mask_params is None)):
raise ValueError('To run DensePose prediction, `mask_params` must also be supplied.')
self._densepose_params = densepose_params
self._track_params = track_params
self._temporal_offset_params = temporal_offset_params
self._prediction_head_dict = self._construct_prediction_heads(num_classes, self._num_feature_outputs, class_prediction_bias_init=self._center_params.heatmap_bias_init)
self._target_assigner_dict = self._initialize_target_assigners(stride=self._stride, min_box_overlap_iou=self._center_params.min_box_overlap_iou)
self._batched_prediction_tensor_names = []
super(CenterNetMetaArch, self).__init__(num_classes)
|
def _construct_prediction_heads(self, num_classes, num_feature_outputs, class_prediction_bias_init):
'Constructs the prediction heads based on the specific parameters.\n\n Args:\n num_classes: An integer indicating how many classes in total to predict.\n num_feature_outputs: An integer indicating how many feature outputs to use\n for calculating the loss. The Objects as Points paper attaches loss\n functions to multiple (`num_feature_outputs`) feature maps in the the\n backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.\n class_prediction_bias_init: float, the initial value of bias in the\n convolutional kernel of the class prediction head. If set to None, the\n bias is initialized with zeros.\n\n Returns:\n A dictionary of keras modules generated by calling make_prediction_net\n function. It will also create and set a private member of the class when\n learning the tracking task.\n '
prediction_heads = {}
prediction_heads[OBJECT_CENTER] = [make_prediction_net(num_classes, bias_fill=class_prediction_bias_init) for _ in range(num_feature_outputs)]
if (self._od_params is not None):
prediction_heads[BOX_SCALE] = [make_prediction_net(NUM_SIZE_CHANNELS) for _ in range(num_feature_outputs)]
prediction_heads[BOX_OFFSET] = [make_prediction_net(NUM_OFFSET_CHANNELS) for _ in range(num_feature_outputs)]
if (self._kp_params_dict is not None):
for (task_name, kp_params) in self._kp_params_dict.items():
num_keypoints = len(kp_params.keypoint_indices)
prediction_heads[get_keypoint_name(task_name, KEYPOINT_HEATMAP)] = [make_prediction_net(num_keypoints, bias_fill=kp_params.heatmap_bias_init) for _ in range(num_feature_outputs)]
prediction_heads[get_keypoint_name(task_name, KEYPOINT_REGRESSION)] = [make_prediction_net((NUM_OFFSET_CHANNELS * num_keypoints)) for _ in range(num_feature_outputs)]
if kp_params.per_keypoint_offset:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [make_prediction_net((NUM_OFFSET_CHANNELS * num_keypoints)) for _ in range(num_feature_outputs)]
else:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [make_prediction_net(NUM_OFFSET_CHANNELS) for _ in range(num_feature_outputs)]
if (self._mask_params is not None):
prediction_heads[SEGMENTATION_HEATMAP] = [make_prediction_net(num_classes, bias_fill=self._mask_params.heatmap_bias_init) for _ in range(num_feature_outputs)]
if (self._densepose_params is not None):
prediction_heads[DENSEPOSE_HEATMAP] = [make_prediction_net(self._densepose_params.num_parts, bias_fill=self._densepose_params.heatmap_bias_init) for _ in range(num_feature_outputs)]
prediction_heads[DENSEPOSE_REGRESSION] = [make_prediction_net((2 * self._densepose_params.num_parts)) for _ in range(num_feature_outputs)]
if (self._track_params is not None):
prediction_heads[TRACK_REID] = [make_prediction_net(self._track_params.reid_embed_size) for _ in range(num_feature_outputs)]
self.track_reid_classification_net = tf.keras.Sequential()
for _ in range((self._track_params.num_fc_layers - 1)):
self.track_reid_classification_net.add(tf.keras.layers.Dense(self._track_params.reid_embed_size, input_shape=(self._track_params.reid_embed_size,)))
self.track_reid_classification_net.add(tf.keras.layers.BatchNormalization())
self.track_reid_classification_net.add(tf.keras.layers.ReLU())
self.track_reid_classification_net.add(tf.keras.layers.Dense(self._track_params.num_track_ids, input_shape=(self._track_params.reid_embed_size,)))
if (self._temporal_offset_params is not None):
prediction_heads[TEMPORAL_OFFSET] = [make_prediction_net(NUM_OFFSET_CHANNELS) for _ in range(num_feature_outputs)]
return prediction_heads
| -6,639,868,782,437,514,000 |
Constructs the prediction heads based on the specific parameters.
Args:
num_classes: An integer indicating how many classes in total to predict.
num_feature_outputs: An integer indicating how many feature outputs to use
for calculating the loss. The Objects as Points paper attaches loss
functions to multiple (`num_feature_outputs`) feature maps in the the
backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.
class_prediction_bias_init: float, the initial value of bias in the
convolutional kernel of the class prediction head. If set to None, the
bias is initialized with zeros.
Returns:
A dictionary of keras modules generated by calling make_prediction_net
function. It will also create and set a private member of the class when
learning the tracking task.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_construct_prediction_heads
|
AvikantSrivastava/models
|
python
|
def _construct_prediction_heads(self, num_classes, num_feature_outputs, class_prediction_bias_init):
'Constructs the prediction heads based on the specific parameters.\n\n Args:\n num_classes: An integer indicating how many classes in total to predict.\n num_feature_outputs: An integer indicating how many feature outputs to use\n for calculating the loss. The Objects as Points paper attaches loss\n functions to multiple (`num_feature_outputs`) feature maps in the the\n backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.\n class_prediction_bias_init: float, the initial value of bias in the\n convolutional kernel of the class prediction head. If set to None, the\n bias is initialized with zeros.\n\n Returns:\n A dictionary of keras modules generated by calling make_prediction_net\n function. It will also create and set a private member of the class when\n learning the tracking task.\n '
prediction_heads = {}
prediction_heads[OBJECT_CENTER] = [make_prediction_net(num_classes, bias_fill=class_prediction_bias_init) for _ in range(num_feature_outputs)]
if (self._od_params is not None):
prediction_heads[BOX_SCALE] = [make_prediction_net(NUM_SIZE_CHANNELS) for _ in range(num_feature_outputs)]
prediction_heads[BOX_OFFSET] = [make_prediction_net(NUM_OFFSET_CHANNELS) for _ in range(num_feature_outputs)]
if (self._kp_params_dict is not None):
for (task_name, kp_params) in self._kp_params_dict.items():
num_keypoints = len(kp_params.keypoint_indices)
prediction_heads[get_keypoint_name(task_name, KEYPOINT_HEATMAP)] = [make_prediction_net(num_keypoints, bias_fill=kp_params.heatmap_bias_init) for _ in range(num_feature_outputs)]
prediction_heads[get_keypoint_name(task_name, KEYPOINT_REGRESSION)] = [make_prediction_net((NUM_OFFSET_CHANNELS * num_keypoints)) for _ in range(num_feature_outputs)]
if kp_params.per_keypoint_offset:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [make_prediction_net((NUM_OFFSET_CHANNELS * num_keypoints)) for _ in range(num_feature_outputs)]
else:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [make_prediction_net(NUM_OFFSET_CHANNELS) for _ in range(num_feature_outputs)]
if (self._mask_params is not None):
prediction_heads[SEGMENTATION_HEATMAP] = [make_prediction_net(num_classes, bias_fill=self._mask_params.heatmap_bias_init) for _ in range(num_feature_outputs)]
if (self._densepose_params is not None):
prediction_heads[DENSEPOSE_HEATMAP] = [make_prediction_net(self._densepose_params.num_parts, bias_fill=self._densepose_params.heatmap_bias_init) for _ in range(num_feature_outputs)]
prediction_heads[DENSEPOSE_REGRESSION] = [make_prediction_net((2 * self._densepose_params.num_parts)) for _ in range(num_feature_outputs)]
if (self._track_params is not None):
prediction_heads[TRACK_REID] = [make_prediction_net(self._track_params.reid_embed_size) for _ in range(num_feature_outputs)]
self.track_reid_classification_net = tf.keras.Sequential()
for _ in range((self._track_params.num_fc_layers - 1)):
self.track_reid_classification_net.add(tf.keras.layers.Dense(self._track_params.reid_embed_size, input_shape=(self._track_params.reid_embed_size,)))
self.track_reid_classification_net.add(tf.keras.layers.BatchNormalization())
self.track_reid_classification_net.add(tf.keras.layers.ReLU())
self.track_reid_classification_net.add(tf.keras.layers.Dense(self._track_params.num_track_ids, input_shape=(self._track_params.reid_embed_size,)))
if (self._temporal_offset_params is not None):
prediction_heads[TEMPORAL_OFFSET] = [make_prediction_net(NUM_OFFSET_CHANNELS) for _ in range(num_feature_outputs)]
return prediction_heads
|
def _initialize_target_assigners(self, stride, min_box_overlap_iou):
'Initializes the target assigners and puts them in a dictionary.\n\n Args:\n stride: An integer indicating the stride of the image.\n min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes\n need have with groundtruth boxes to not be penalized. This is used for\n computing the class specific center heatmaps.\n\n Returns:\n A dictionary of initialized target assigners for each task.\n '
target_assigners = {}
target_assigners[OBJECT_CENTER] = cn_assigner.CenterNetCenterHeatmapTargetAssigner(stride, min_box_overlap_iou)
if (self._od_params is not None):
target_assigners[DETECTION_TASK] = cn_assigner.CenterNetBoxTargetAssigner(stride)
if (self._kp_params_dict is not None):
for (task_name, kp_params) in self._kp_params_dict.items():
target_assigners[task_name] = cn_assigner.CenterNetKeypointTargetAssigner(stride=stride, class_id=kp_params.class_id, keypoint_indices=kp_params.keypoint_indices, keypoint_std_dev=kp_params.keypoint_std_dev, peak_radius=kp_params.offset_peak_radius, per_keypoint_offset=kp_params.per_keypoint_offset)
if (self._mask_params is not None):
target_assigners[SEGMENTATION_TASK] = cn_assigner.CenterNetMaskTargetAssigner(stride)
if (self._densepose_params is not None):
dp_stride = (1 if self._densepose_params.upsample_to_input_res else stride)
target_assigners[DENSEPOSE_TASK] = cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride)
if (self._track_params is not None):
target_assigners[TRACK_TASK] = cn_assigner.CenterNetTrackTargetAssigner(stride, self._track_params.num_track_ids)
if (self._temporal_offset_params is not None):
target_assigners[TEMPORALOFFSET_TASK] = cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride)
return target_assigners
| -3,979,121,992,371,626,000 |
Initializes the target assigners and puts them in a dictionary.
Args:
stride: An integer indicating the stride of the image.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
Returns:
A dictionary of initialized target assigners for each task.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_initialize_target_assigners
|
AvikantSrivastava/models
|
python
|
def _initialize_target_assigners(self, stride, min_box_overlap_iou):
'Initializes the target assigners and puts them in a dictionary.\n\n Args:\n stride: An integer indicating the stride of the image.\n min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes\n need have with groundtruth boxes to not be penalized. This is used for\n computing the class specific center heatmaps.\n\n Returns:\n A dictionary of initialized target assigners for each task.\n '
target_assigners = {}
target_assigners[OBJECT_CENTER] = cn_assigner.CenterNetCenterHeatmapTargetAssigner(stride, min_box_overlap_iou)
if (self._od_params is not None):
target_assigners[DETECTION_TASK] = cn_assigner.CenterNetBoxTargetAssigner(stride)
if (self._kp_params_dict is not None):
for (task_name, kp_params) in self._kp_params_dict.items():
target_assigners[task_name] = cn_assigner.CenterNetKeypointTargetAssigner(stride=stride, class_id=kp_params.class_id, keypoint_indices=kp_params.keypoint_indices, keypoint_std_dev=kp_params.keypoint_std_dev, peak_radius=kp_params.offset_peak_radius, per_keypoint_offset=kp_params.per_keypoint_offset)
if (self._mask_params is not None):
target_assigners[SEGMENTATION_TASK] = cn_assigner.CenterNetMaskTargetAssigner(stride)
if (self._densepose_params is not None):
dp_stride = (1 if self._densepose_params.upsample_to_input_res else stride)
target_assigners[DENSEPOSE_TASK] = cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride)
if (self._track_params is not None):
target_assigners[TRACK_TASK] = cn_assigner.CenterNetTrackTargetAssigner(stride, self._track_params.num_track_ids)
if (self._temporal_offset_params is not None):
target_assigners[TEMPORALOFFSET_TASK] = cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride)
return target_assigners
|
def _compute_object_center_loss(self, input_height, input_width, object_center_predictions, per_pixel_weights):
'Computes the object center loss.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n object_center_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_classes] representing the object center\n feature maps.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A float scalar tensor representing the object center loss per instance.\n '
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
if self._center_params.use_only_known_classes:
gt_labeled_classes_list = self.groundtruth_lists(fields.InputDataFields.groundtruth_labeled_classes)
batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)
batch_labeled_classes_shape = tf.shape(batch_labeled_classes)
batch_labeled_classes = tf.reshape(batch_labeled_classes, [batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[(- 1)]])
per_pixel_weights = (per_pixel_weights * batch_labeled_classes)
assigner = self._target_assigner_dict[OBJECT_CENTER]
heatmap_targets = assigner.assign_center_targets_from_boxes(height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_classes_list=gt_classes_list, gt_weights_list=gt_weights_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
loss = 0.0
object_center_loss = self._center_params.classification_loss
for pred in object_center_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += object_center_loss(pred, flattened_heatmap_targets, weights=per_pixel_weights)
loss_per_instance = (tf.reduce_sum(loss) / (float(len(object_center_predictions)) * num_boxes))
return loss_per_instance
| -3,619,118,231,556,900,400 |
Computes the object center loss.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_center_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_classes] representing the object center
feature maps.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the object center loss per instance.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_object_center_loss
|
AvikantSrivastava/models
|
python
|
def _compute_object_center_loss(self, input_height, input_width, object_center_predictions, per_pixel_weights):
'Computes the object center loss.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n object_center_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_classes] representing the object center\n feature maps.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A float scalar tensor representing the object center loss per instance.\n '
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
if self._center_params.use_only_known_classes:
gt_labeled_classes_list = self.groundtruth_lists(fields.InputDataFields.groundtruth_labeled_classes)
batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)
batch_labeled_classes_shape = tf.shape(batch_labeled_classes)
batch_labeled_classes = tf.reshape(batch_labeled_classes, [batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[(- 1)]])
per_pixel_weights = (per_pixel_weights * batch_labeled_classes)
assigner = self._target_assigner_dict[OBJECT_CENTER]
heatmap_targets = assigner.assign_center_targets_from_boxes(height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_classes_list=gt_classes_list, gt_weights_list=gt_weights_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
loss = 0.0
object_center_loss = self._center_params.classification_loss
for pred in object_center_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += object_center_loss(pred, flattened_heatmap_targets, weights=per_pixel_weights)
loss_per_instance = (tf.reduce_sum(loss) / (float(len(object_center_predictions)) * num_boxes))
return loss_per_instance
|
def _compute_object_detection_losses(self, input_height, input_width, prediction_dict, per_pixel_weights):
'Computes the weighted object detection losses.\n\n This wrapper function calls the function which computes the losses for\n object detection task and applies corresponding weights to the losses.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: A dictionary holding predicted tensors output by\n "predict" function. See "predict" function for more detailed\n description.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A dictionary of scalar float tensors representing the weighted losses for\n object detection task:\n BOX_SCALE: the weighted scale (height/width) loss.\n BOX_OFFSET: the weighted object offset loss.\n '
(od_scale_loss, od_offset_loss) = self._compute_box_scale_and_offset_loss(scale_predictions=prediction_dict[BOX_SCALE], offset_predictions=prediction_dict[BOX_OFFSET], input_height=input_height, input_width=input_width)
loss_dict = {}
loss_dict[BOX_SCALE] = (self._od_params.scale_loss_weight * od_scale_loss)
loss_dict[BOX_OFFSET] = (self._od_params.offset_loss_weight * od_offset_loss)
return loss_dict
| 806,099,432,362,530,600 |
Computes the weighted object detection losses.
This wrapper function calls the function which computes the losses for
object detection task and applies corresponding weights to the losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by
"predict" function. See "predict" function for more detailed
description.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
object detection task:
BOX_SCALE: the weighted scale (height/width) loss.
BOX_OFFSET: the weighted object offset loss.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_object_detection_losses
|
AvikantSrivastava/models
|
python
|
def _compute_object_detection_losses(self, input_height, input_width, prediction_dict, per_pixel_weights):
'Computes the weighted object detection losses.\n\n This wrapper function calls the function which computes the losses for\n object detection task and applies corresponding weights to the losses.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: A dictionary holding predicted tensors output by\n "predict" function. See "predict" function for more detailed\n description.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A dictionary of scalar float tensors representing the weighted losses for\n object detection task:\n BOX_SCALE: the weighted scale (height/width) loss.\n BOX_OFFSET: the weighted object offset loss.\n '
(od_scale_loss, od_offset_loss) = self._compute_box_scale_and_offset_loss(scale_predictions=prediction_dict[BOX_SCALE], offset_predictions=prediction_dict[BOX_OFFSET], input_height=input_height, input_width=input_width)
loss_dict = {}
loss_dict[BOX_SCALE] = (self._od_params.scale_loss_weight * od_scale_loss)
loss_dict[BOX_OFFSET] = (self._od_params.offset_loss_weight * od_offset_loss)
return loss_dict
|
def _compute_box_scale_and_offset_loss(self, input_height, input_width, scale_predictions, offset_predictions):
'Computes the scale loss of the object detection task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n scale_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for object scale (i.e height and width).\n offset_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for object offset.\n\n Returns:\n A tuple of two losses:\n scale_loss: A float scalar tensor representing the object height/width\n loss normalized by total number of boxes.\n offset_loss: A float scalar tensor representing the object offset loss\n normalized by total number of boxes\n '
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
num_predictions = float(len(scale_predictions))
assigner = self._target_assigner_dict[DETECTION_TASK]
(batch_indices, batch_height_width_targets, batch_offset_targets, batch_weights) = assigner.assign_size_and_offset_targets(height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, (- 1))
scale_loss = 0
offset_loss = 0
localization_loss_fn = self._od_params.localization_loss
for (scale_pred, offset_pred) in zip(scale_predictions, offset_predictions):
scale_pred = cn_assigner.get_batch_predictions_from_indices(scale_pred, batch_indices)
scale_loss += localization_loss_fn(scale_pred, batch_height_width_targets, weights=batch_weights)
offset_pred = cn_assigner.get_batch_predictions_from_indices(offset_pred, batch_indices)
offset_loss += localization_loss_fn(offset_pred, batch_offset_targets, weights=batch_weights)
scale_loss = (tf.reduce_sum(scale_loss) / (num_predictions * num_boxes))
offset_loss = (tf.reduce_sum(offset_loss) / (num_predictions * num_boxes))
return (scale_loss, offset_loss)
| -696,938,473,885,633,900 |
Computes the scale loss of the object detection task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
scale_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object scale (i.e height and width).
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object offset.
Returns:
A tuple of two losses:
scale_loss: A float scalar tensor representing the object height/width
loss normalized by total number of boxes.
offset_loss: A float scalar tensor representing the object offset loss
normalized by total number of boxes
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_box_scale_and_offset_loss
|
AvikantSrivastava/models
|
python
|
def _compute_box_scale_and_offset_loss(self, input_height, input_width, scale_predictions, offset_predictions):
'Computes the scale loss of the object detection task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n scale_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for object scale (i.e height and width).\n offset_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for object offset.\n\n Returns:\n A tuple of two losses:\n scale_loss: A float scalar tensor representing the object height/width\n loss normalized by total number of boxes.\n offset_loss: A float scalar tensor representing the object offset loss\n normalized by total number of boxes\n '
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
num_predictions = float(len(scale_predictions))
assigner = self._target_assigner_dict[DETECTION_TASK]
(batch_indices, batch_height_width_targets, batch_offset_targets, batch_weights) = assigner.assign_size_and_offset_targets(height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, (- 1))
scale_loss = 0
offset_loss = 0
localization_loss_fn = self._od_params.localization_loss
for (scale_pred, offset_pred) in zip(scale_predictions, offset_predictions):
scale_pred = cn_assigner.get_batch_predictions_from_indices(scale_pred, batch_indices)
scale_loss += localization_loss_fn(scale_pred, batch_height_width_targets, weights=batch_weights)
offset_pred = cn_assigner.get_batch_predictions_from_indices(offset_pred, batch_indices)
offset_loss += localization_loss_fn(offset_pred, batch_offset_targets, weights=batch_weights)
scale_loss = (tf.reduce_sum(scale_loss) / (num_predictions * num_boxes))
offset_loss = (tf.reduce_sum(offset_loss) / (num_predictions * num_boxes))
return (scale_loss, offset_loss)
|
def _compute_keypoint_estimation_losses(self, task_name, input_height, input_width, prediction_dict, per_pixel_weights):
'Computes the weighted keypoint losses.'
kp_params = self._kp_params_dict[task_name]
heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)
offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)
regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)
heatmap_loss = self._compute_kp_heatmap_loss(input_height=input_height, input_width=input_width, task_name=task_name, heatmap_predictions=prediction_dict[heatmap_key], classification_loss_fn=kp_params.classification_loss, per_pixel_weights=per_pixel_weights)
offset_loss = self._compute_kp_offset_loss(input_height=input_height, input_width=input_width, task_name=task_name, offset_predictions=prediction_dict[offset_key], localization_loss_fn=kp_params.localization_loss)
reg_loss = self._compute_kp_regression_loss(input_height=input_height, input_width=input_width, task_name=task_name, regression_predictions=prediction_dict[regression_key], localization_loss_fn=kp_params.localization_loss)
loss_dict = {}
loss_dict[heatmap_key] = (kp_params.keypoint_heatmap_loss_weight * heatmap_loss)
loss_dict[offset_key] = (kp_params.keypoint_offset_loss_weight * offset_loss)
loss_dict[regression_key] = (kp_params.keypoint_regression_loss_weight * reg_loss)
return loss_dict
| 389,698,818,048,579,700 |
Computes the weighted keypoint losses.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_keypoint_estimation_losses
|
AvikantSrivastava/models
|
python
|
def _compute_keypoint_estimation_losses(self, task_name, input_height, input_width, prediction_dict, per_pixel_weights):
kp_params = self._kp_params_dict[task_name]
heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)
offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)
regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)
heatmap_loss = self._compute_kp_heatmap_loss(input_height=input_height, input_width=input_width, task_name=task_name, heatmap_predictions=prediction_dict[heatmap_key], classification_loss_fn=kp_params.classification_loss, per_pixel_weights=per_pixel_weights)
offset_loss = self._compute_kp_offset_loss(input_height=input_height, input_width=input_width, task_name=task_name, offset_predictions=prediction_dict[offset_key], localization_loss_fn=kp_params.localization_loss)
reg_loss = self._compute_kp_regression_loss(input_height=input_height, input_width=input_width, task_name=task_name, regression_predictions=prediction_dict[regression_key], localization_loss_fn=kp_params.localization_loss)
loss_dict = {}
loss_dict[heatmap_key] = (kp_params.keypoint_heatmap_loss_weight * heatmap_loss)
loss_dict[offset_key] = (kp_params.keypoint_offset_loss_weight * offset_loss)
loss_dict[regression_key] = (kp_params.keypoint_regression_loss_weight * reg_loss)
return loss_dict
|
def _compute_kp_heatmap_loss(self, input_height, input_width, task_name, heatmap_predictions, classification_loss_fn, per_pixel_weights):
'Computes the heatmap loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n heatmap_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_keypoints] representing the prediction heads\n of the model for keypoint heatmap.\n classification_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n loss: A float scalar tensor representing the object keypoint heatmap loss\n normalized by number of instances.\n '
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
assigner = self._target_assigner_dict[task_name]
(keypoint_heatmap, num_instances_per_kp_type, valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list)
flattened_valid_mask = _flatten_spatial_dimensions(tf.expand_dims(valid_mask_batch, axis=(- 1)))
flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)
num_instances = tf.maximum(tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32), 1.0)
loss = 0.0
for pred in heatmap_predictions:
pred = _flatten_spatial_dimensions(pred)
unweighted_loss = classification_loss_fn(pred, flattened_heapmap_targets, weights=tf.ones_like(per_pixel_weights))
loss += ((unweighted_loss * per_pixel_weights) * flattened_valid_mask)
loss = (tf.reduce_sum(loss) / (float(len(heatmap_predictions)) * num_instances))
return loss
| 4,771,942,718,064,745,000 |
Computes the heatmap loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
heatmap_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_keypoints] representing the prediction heads
of the model for keypoint heatmap.
classification_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
loss: A float scalar tensor representing the object keypoint heatmap loss
normalized by number of instances.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_kp_heatmap_loss
|
AvikantSrivastava/models
|
python
|
def _compute_kp_heatmap_loss(self, input_height, input_width, task_name, heatmap_predictions, classification_loss_fn, per_pixel_weights):
'Computes the heatmap loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n heatmap_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_keypoints] representing the prediction heads\n of the model for keypoint heatmap.\n classification_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n loss: A float scalar tensor representing the object keypoint heatmap loss\n normalized by number of instances.\n '
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
assigner = self._target_assigner_dict[task_name]
(keypoint_heatmap, num_instances_per_kp_type, valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list)
flattened_valid_mask = _flatten_spatial_dimensions(tf.expand_dims(valid_mask_batch, axis=(- 1)))
flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)
num_instances = tf.maximum(tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32), 1.0)
loss = 0.0
for pred in heatmap_predictions:
pred = _flatten_spatial_dimensions(pred)
unweighted_loss = classification_loss_fn(pred, flattened_heapmap_targets, weights=tf.ones_like(per_pixel_weights))
loss += ((unweighted_loss * per_pixel_weights) * flattened_valid_mask)
loss = (tf.reduce_sum(loss) / (float(len(heatmap_predictions)) * num_instances))
return loss
|
def _compute_kp_offset_loss(self, input_height, input_width, task_name, offset_predictions, localization_loss_fn):
'Computes the offset loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n offset_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for keypoint offset.\n localization_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the keypoint offset predictions in CenterNet.\n\n Returns:\n loss: A float scalar tensor representing the keypoint offset loss\n normalized by number of total keypoints.\n '
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_offsets, batch_weights) = assigner.assign_keypoints_offset_targets(height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list)
loss = 0.0
for prediction in offset_predictions:
(batch_size, out_height, out_width, channels) = _get_shape(prediction, 4)
if (channels > 2):
prediction = tf.reshape(prediction, shape=[batch_size, out_height, out_width, (- 1), 2])
prediction = cn_assigner.get_batch_predictions_from_indices(prediction, batch_indices)
unweighted_loss = localization_loss_fn(prediction, batch_offsets, weights=tf.expand_dims(tf.ones_like(batch_weights), (- 1)))
loss += (batch_weights * tf.reduce_sum(unweighted_loss, axis=1))
loss = (tf.reduce_sum(loss) / (float(len(offset_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)))
return loss
| 3,327,474,415,315,478,000 |
Computes the offset loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for keypoint offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint offset loss
normalized by number of total keypoints.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_kp_offset_loss
|
AvikantSrivastava/models
|
python
|
def _compute_kp_offset_loss(self, input_height, input_width, task_name, offset_predictions, localization_loss_fn):
'Computes the offset loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n offset_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for keypoint offset.\n localization_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the keypoint offset predictions in CenterNet.\n\n Returns:\n loss: A float scalar tensor representing the keypoint offset loss\n normalized by number of total keypoints.\n '
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_offsets, batch_weights) = assigner.assign_keypoints_offset_targets(height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list)
loss = 0.0
for prediction in offset_predictions:
(batch_size, out_height, out_width, channels) = _get_shape(prediction, 4)
if (channels > 2):
prediction = tf.reshape(prediction, shape=[batch_size, out_height, out_width, (- 1), 2])
prediction = cn_assigner.get_batch_predictions_from_indices(prediction, batch_indices)
unweighted_loss = localization_loss_fn(prediction, batch_offsets, weights=tf.expand_dims(tf.ones_like(batch_weights), (- 1)))
loss += (batch_weights * tf.reduce_sum(unweighted_loss, axis=1))
loss = (tf.reduce_sum(loss) / (float(len(offset_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)))
return loss
|
def _compute_kp_regression_loss(self, input_height, input_width, task_name, regression_predictions, localization_loss_fn):
'Computes the keypoint regression loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n regression_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2 * num_keypoints] representing the prediction\n heads of the model for keypoint regression offset.\n localization_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the keypoint regression offset predictions in\n CenterNet.\n\n Returns:\n loss: A float scalar tensor representing the keypoint regression offset\n loss normalized by number of total keypoints.\n '
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_regression_offsets, batch_weights) = assigner.assign_joint_regression_targets(height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list, gt_weights_list=gt_weights_list, gt_boxes_list=gt_boxes_list)
loss = 0.0
for prediction in regression_predictions:
(batch_size, out_height, out_width, _) = _get_shape(prediction, 4)
reshaped_prediction = tf.reshape(prediction, shape=[batch_size, out_height, out_width, (- 1), 2])
reg_prediction = cn_assigner.get_batch_predictions_from_indices(reshaped_prediction, batch_indices)
unweighted_loss = localization_loss_fn(reg_prediction, batch_regression_offsets, weights=tf.expand_dims(tf.ones_like(batch_weights), (- 1)))
loss += (batch_weights * tf.reduce_sum(unweighted_loss, axis=1))
loss = (tf.reduce_sum(loss) / (float(len(regression_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)))
return loss
| 3,207,325,843,742,887,000 |
Computes the keypoint regression loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
regression_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_keypoints] representing the prediction
heads of the model for keypoint regression offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint regression offset predictions in
CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint regression offset
loss normalized by number of total keypoints.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_kp_regression_loss
|
AvikantSrivastava/models
|
python
|
def _compute_kp_regression_loss(self, input_height, input_width, task_name, regression_predictions, localization_loss_fn):
'Computes the keypoint regression loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n regression_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2 * num_keypoints] representing the prediction\n heads of the model for keypoint regression offset.\n localization_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the keypoint regression offset predictions in\n CenterNet.\n\n Returns:\n loss: A float scalar tensor representing the keypoint regression offset\n loss normalized by number of total keypoints.\n '
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_regression_offsets, batch_weights) = assigner.assign_joint_regression_targets(height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list, gt_weights_list=gt_weights_list, gt_boxes_list=gt_boxes_list)
loss = 0.0
for prediction in regression_predictions:
(batch_size, out_height, out_width, _) = _get_shape(prediction, 4)
reshaped_prediction = tf.reshape(prediction, shape=[batch_size, out_height, out_width, (- 1), 2])
reg_prediction = cn_assigner.get_batch_predictions_from_indices(reshaped_prediction, batch_indices)
unweighted_loss = localization_loss_fn(reg_prediction, batch_regression_offsets, weights=tf.expand_dims(tf.ones_like(batch_weights), (- 1)))
loss += (batch_weights * tf.reduce_sum(unweighted_loss, axis=1))
loss = (tf.reduce_sum(loss) / (float(len(regression_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)))
return loss
|
def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):
'Computes all the losses associated with segmentation.\n\n Args:\n prediction_dict: The dictionary returned from the predict() method.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A dictionary with segmentation losses.\n '
segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]
mask_loss = self._compute_mask_loss(segmentation_heatmap, per_pixel_weights)
losses = {SEGMENTATION_HEATMAP: mask_loss}
return losses
| -7,498,127,224,504,642,000 |
Computes all the losses associated with segmentation.
Args:
prediction_dict: The dictionary returned from the predict() method.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary with segmentation losses.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_segmentation_losses
|
AvikantSrivastava/models
|
python
|
def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):
'Computes all the losses associated with segmentation.\n\n Args:\n prediction_dict: The dictionary returned from the predict() method.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A dictionary with segmentation losses.\n '
segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]
mask_loss = self._compute_mask_loss(segmentation_heatmap, per_pixel_weights)
losses = {SEGMENTATION_HEATMAP: mask_loss}
return losses
|
def _compute_mask_loss(self, segmentation_predictions, per_pixel_weights):
'Computes the mask loss.\n\n Args:\n segmentation_predictions: A list of float32 tensors of shape [batch_size,\n out_height, out_width, num_classes].\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A float scalar tensor representing the mask loss.\n '
gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
assigner = self._target_assigner_dict[SEGMENTATION_TASK]
heatmap_targets = assigner.assign_segmentation_targets(gt_masks_list=gt_masks_list, gt_classes_list=gt_classes_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
loss = 0.0
mask_loss_fn = self._mask_params.classification_loss
total_pixels_in_loss = tf.reduce_sum(per_pixel_weights)
for pred in segmentation_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += mask_loss_fn(pred, flattened_heatmap_targets, weights=per_pixel_weights)
total_loss = (tf.reduce_sum(loss) / (float(len(segmentation_predictions)) * total_pixels_in_loss))
return total_loss
| -7,094,407,873,046,366,000 |
Computes the mask loss.
Args:
segmentation_predictions: A list of float32 tensors of shape [batch_size,
out_height, out_width, num_classes].
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the mask loss.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_mask_loss
|
AvikantSrivastava/models
|
python
|
def _compute_mask_loss(self, segmentation_predictions, per_pixel_weights):
'Computes the mask loss.\n\n Args:\n segmentation_predictions: A list of float32 tensors of shape [batch_size,\n out_height, out_width, num_classes].\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A float scalar tensor representing the mask loss.\n '
gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
assigner = self._target_assigner_dict[SEGMENTATION_TASK]
heatmap_targets = assigner.assign_segmentation_targets(gt_masks_list=gt_masks_list, gt_classes_list=gt_classes_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
loss = 0.0
mask_loss_fn = self._mask_params.classification_loss
total_pixels_in_loss = tf.reduce_sum(per_pixel_weights)
for pred in segmentation_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += mask_loss_fn(pred, flattened_heatmap_targets, weights=per_pixel_weights)
total_loss = (tf.reduce_sum(loss) / (float(len(segmentation_predictions)) * total_pixels_in_loss))
return total_loss
|
def _compute_densepose_losses(self, input_height, input_width, prediction_dict):
'Computes the weighted DensePose losses.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: A dictionary holding predicted tensors output by the\n "predict" function. See the "predict" function for more detailed\n description.\n\n Returns:\n A dictionary of scalar float tensors representing the weighted losses for\n the DensePose task:\n DENSEPOSE_HEATMAP: the weighted part segmentation loss.\n DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.\n '
(dp_heatmap_loss, dp_regression_loss) = self._compute_densepose_part_and_coordinate_losses(input_height=input_height, input_width=input_width, part_predictions=prediction_dict[DENSEPOSE_HEATMAP], surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION])
loss_dict = {}
loss_dict[DENSEPOSE_HEATMAP] = (self._densepose_params.part_loss_weight * dp_heatmap_loss)
loss_dict[DENSEPOSE_REGRESSION] = (self._densepose_params.coordinate_loss_weight * dp_regression_loss)
return loss_dict
| -3,313,712,703,948,206,600 |
Computes the weighted DensePose losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by the
"predict" function. See the "predict" function for more detailed
description.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
the DensePose task:
DENSEPOSE_HEATMAP: the weighted part segmentation loss.
DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_densepose_losses
|
AvikantSrivastava/models
|
python
|
def _compute_densepose_losses(self, input_height, input_width, prediction_dict):
'Computes the weighted DensePose losses.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: A dictionary holding predicted tensors output by the\n "predict" function. See the "predict" function for more detailed\n description.\n\n Returns:\n A dictionary of scalar float tensors representing the weighted losses for\n the DensePose task:\n DENSEPOSE_HEATMAP: the weighted part segmentation loss.\n DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.\n '
(dp_heatmap_loss, dp_regression_loss) = self._compute_densepose_part_and_coordinate_losses(input_height=input_height, input_width=input_width, part_predictions=prediction_dict[DENSEPOSE_HEATMAP], surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION])
loss_dict = {}
loss_dict[DENSEPOSE_HEATMAP] = (self._densepose_params.part_loss_weight * dp_heatmap_loss)
loss_dict[DENSEPOSE_REGRESSION] = (self._densepose_params.coordinate_loss_weight * dp_regression_loss)
return loss_dict
|
def _compute_densepose_part_and_coordinate_losses(self, input_height, input_width, part_predictions, surface_coord_predictions):
'Computes the individual losses for the DensePose task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n part_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_parts].\n surface_coord_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2 * num_parts].\n\n Returns:\n A tuple with two scalar loss tensors: part_prediction_loss and\n surface_coord_loss.\n '
gt_dp_num_points_list = self.groundtruth_lists(fields.BoxListFields.densepose_num_points)
gt_dp_part_ids_list = self.groundtruth_lists(fields.BoxListFields.densepose_part_ids)
gt_dp_surface_coords_list = self.groundtruth_lists(fields.BoxListFields.densepose_surface_coords)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[DENSEPOSE_TASK]
(batch_indices, batch_part_ids, batch_surface_coords, batch_weights) = assigner.assign_part_and_coordinate_targets(height=input_height, width=input_width, gt_dp_num_points_list=gt_dp_num_points_list, gt_dp_part_ids_list=gt_dp_part_ids_list, gt_dp_surface_coords_list=gt_dp_surface_coords_list, gt_weights_list=gt_weights_list)
part_prediction_loss = 0
surface_coord_loss = 0
classification_loss_fn = self._densepose_params.classification_loss
localization_loss_fn = self._densepose_params.localization_loss
num_predictions = float(len(part_predictions))
num_valid_points = tf.math.count_nonzero(batch_weights)
num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)
for (part_pred, surface_coord_pred) in zip(part_predictions, surface_coord_predictions):
if self._densepose_params.upsample_to_input_res:
part_pred = tf.keras.layers.UpSampling2D(self._stride, interpolation=self._densepose_params.upsample_method)(part_pred)
surface_coord_pred = tf.keras.layers.UpSampling2D(self._stride, interpolation=self._densepose_params.upsample_method)(surface_coord_pred)
part_pred = cn_assigner.get_batch_predictions_from_indices(part_pred, batch_indices[:, 0:3])
part_prediction_loss += classification_loss_fn(part_pred[:, tf.newaxis, :], batch_part_ids[:, tf.newaxis, :], weights=batch_weights[:, tf.newaxis, tf.newaxis])
(batch_size, out_height, out_width, _) = _get_shape(surface_coord_pred, 4)
surface_coord_pred = tf.reshape(surface_coord_pred, [batch_size, out_height, out_width, (- 1), 2])
surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(surface_coord_pred, batch_indices)
surface_coord_loss += localization_loss_fn(surface_coord_pred, batch_surface_coords, weights=batch_weights[:, tf.newaxis])
part_prediction_loss = (tf.reduce_sum(part_prediction_loss) / (num_predictions * num_valid_points))
surface_coord_loss = (tf.reduce_sum(surface_coord_loss) / (num_predictions * num_valid_points))
return (part_prediction_loss, surface_coord_loss)
| 1,636,117,419,904,766,700 |
Computes the individual losses for the DensePose task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
part_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_parts].
surface_coord_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_parts].
Returns:
A tuple with two scalar loss tensors: part_prediction_loss and
surface_coord_loss.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_densepose_part_and_coordinate_losses
|
AvikantSrivastava/models
|
python
|
def _compute_densepose_part_and_coordinate_losses(self, input_height, input_width, part_predictions, surface_coord_predictions):
'Computes the individual losses for the DensePose task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n part_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_parts].\n surface_coord_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2 * num_parts].\n\n Returns:\n A tuple with two scalar loss tensors: part_prediction_loss and\n surface_coord_loss.\n '
gt_dp_num_points_list = self.groundtruth_lists(fields.BoxListFields.densepose_num_points)
gt_dp_part_ids_list = self.groundtruth_lists(fields.BoxListFields.densepose_part_ids)
gt_dp_surface_coords_list = self.groundtruth_lists(fields.BoxListFields.densepose_surface_coords)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[DENSEPOSE_TASK]
(batch_indices, batch_part_ids, batch_surface_coords, batch_weights) = assigner.assign_part_and_coordinate_targets(height=input_height, width=input_width, gt_dp_num_points_list=gt_dp_num_points_list, gt_dp_part_ids_list=gt_dp_part_ids_list, gt_dp_surface_coords_list=gt_dp_surface_coords_list, gt_weights_list=gt_weights_list)
part_prediction_loss = 0
surface_coord_loss = 0
classification_loss_fn = self._densepose_params.classification_loss
localization_loss_fn = self._densepose_params.localization_loss
num_predictions = float(len(part_predictions))
num_valid_points = tf.math.count_nonzero(batch_weights)
num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)
for (part_pred, surface_coord_pred) in zip(part_predictions, surface_coord_predictions):
if self._densepose_params.upsample_to_input_res:
part_pred = tf.keras.layers.UpSampling2D(self._stride, interpolation=self._densepose_params.upsample_method)(part_pred)
surface_coord_pred = tf.keras.layers.UpSampling2D(self._stride, interpolation=self._densepose_params.upsample_method)(surface_coord_pred)
part_pred = cn_assigner.get_batch_predictions_from_indices(part_pred, batch_indices[:, 0:3])
part_prediction_loss += classification_loss_fn(part_pred[:, tf.newaxis, :], batch_part_ids[:, tf.newaxis, :], weights=batch_weights[:, tf.newaxis, tf.newaxis])
(batch_size, out_height, out_width, _) = _get_shape(surface_coord_pred, 4)
surface_coord_pred = tf.reshape(surface_coord_pred, [batch_size, out_height, out_width, (- 1), 2])
surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(surface_coord_pred, batch_indices)
surface_coord_loss += localization_loss_fn(surface_coord_pred, batch_surface_coords, weights=batch_weights[:, tf.newaxis])
part_prediction_loss = (tf.reduce_sum(part_prediction_loss) / (num_predictions * num_valid_points))
surface_coord_loss = (tf.reduce_sum(surface_coord_loss) / (num_predictions * num_valid_points))
return (part_prediction_loss, surface_coord_loss)
|
def _compute_track_losses(self, input_height, input_width, prediction_dict):
'Computes all the losses associated with tracking.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: The dictionary returned from the predict() method.\n\n Returns:\n A dictionary with tracking losses.\n '
object_reid_predictions = prediction_dict[TRACK_REID]
embedding_loss = self._compute_track_embedding_loss(input_height=input_height, input_width=input_width, object_reid_predictions=object_reid_predictions)
losses = {TRACK_REID: embedding_loss}
return losses
| 3,826,571,471,669,439,500 |
Computes all the losses associated with tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with tracking losses.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_track_losses
|
AvikantSrivastava/models
|
python
|
def _compute_track_losses(self, input_height, input_width, prediction_dict):
'Computes all the losses associated with tracking.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: The dictionary returned from the predict() method.\n\n Returns:\n A dictionary with tracking losses.\n '
object_reid_predictions = prediction_dict[TRACK_REID]
embedding_loss = self._compute_track_embedding_loss(input_height=input_height, input_width=input_width, object_reid_predictions=object_reid_predictions)
losses = {TRACK_REID: embedding_loss}
return losses
|
def _compute_track_embedding_loss(self, input_height, input_width, object_reid_predictions):
'Computes the object ReID loss.\n\n The embedding is trained as a classification task where the target is the\n ID of each track among all tracks in the whole dataset.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n object_reid_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, reid_embed_size] representing the object\n embedding feature maps.\n\n Returns:\n A float scalar tensor representing the object ReID loss per instance.\n '
gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
assigner = self._target_assigner_dict[TRACK_TASK]
(batch_indices, batch_weights, track_targets) = assigner.assign_track_targets(height=input_height, width=input_width, gt_track_ids_list=gt_track_ids_list, gt_boxes_list=gt_boxes_list, gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, (- 1))
loss = 0.0
object_reid_loss = self._track_params.classification_loss
for pred in object_reid_predictions:
embedding_pred = cn_assigner.get_batch_predictions_from_indices(pred, batch_indices)
reid_classification = self.track_reid_classification_net(embedding_pred)
loss += object_reid_loss(reid_classification, track_targets, weights=batch_weights)
loss_per_instance = (tf.reduce_sum(loss) / (float(len(object_reid_predictions)) * num_boxes))
return loss_per_instance
| -975,937,577,233,702,400 |
Computes the object ReID loss.
The embedding is trained as a classification task where the target is the
ID of each track among all tracks in the whole dataset.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_reid_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, reid_embed_size] representing the object
embedding feature maps.
Returns:
A float scalar tensor representing the object ReID loss per instance.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_track_embedding_loss
|
AvikantSrivastava/models
|
python
|
def _compute_track_embedding_loss(self, input_height, input_width, object_reid_predictions):
'Computes the object ReID loss.\n\n The embedding is trained as a classification task where the target is the\n ID of each track among all tracks in the whole dataset.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n object_reid_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, reid_embed_size] representing the object\n embedding feature maps.\n\n Returns:\n A float scalar tensor representing the object ReID loss per instance.\n '
gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
assigner = self._target_assigner_dict[TRACK_TASK]
(batch_indices, batch_weights, track_targets) = assigner.assign_track_targets(height=input_height, width=input_width, gt_track_ids_list=gt_track_ids_list, gt_boxes_list=gt_boxes_list, gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, (- 1))
loss = 0.0
object_reid_loss = self._track_params.classification_loss
for pred in object_reid_predictions:
embedding_pred = cn_assigner.get_batch_predictions_from_indices(pred, batch_indices)
reid_classification = self.track_reid_classification_net(embedding_pred)
loss += object_reid_loss(reid_classification, track_targets, weights=batch_weights)
loss_per_instance = (tf.reduce_sum(loss) / (float(len(object_reid_predictions)) * num_boxes))
return loss_per_instance
|
def _compute_temporal_offset_loss(self, input_height, input_width, prediction_dict):
'Computes the temporal offset loss for tracking.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: The dictionary returned from the predict() method.\n\n Returns:\n A dictionary with track/temporal_offset losses.\n '
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_offsets_list = self.groundtruth_lists(fields.BoxListFields.temporal_offsets)
gt_match_list = self.groundtruth_lists(fields.BoxListFields.track_match_flags)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = tf.cast(get_num_instances_from_weights(gt_weights_list), tf.float32)
offset_predictions = prediction_dict[TEMPORAL_OFFSET]
num_predictions = float(len(offset_predictions))
assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]
(batch_indices, batch_offset_targets, batch_weights) = assigner.assign_temporal_offset_targets(height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_offsets_list=gt_offsets_list, gt_match_list=gt_match_list, gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, (- 1))
offset_loss_fn = self._temporal_offset_params.localization_loss
loss_dict = {}
offset_loss = 0
for offset_pred in offset_predictions:
offset_pred = cn_assigner.get_batch_predictions_from_indices(offset_pred, batch_indices)
offset_loss += offset_loss_fn(offset_pred[:, None], batch_offset_targets[:, None], weights=batch_weights)
offset_loss = (tf.reduce_sum(offset_loss) / (num_predictions * num_boxes))
loss_dict[TEMPORAL_OFFSET] = offset_loss
return loss_dict
| -6,298,571,109,006,518,000 |
Computes the temporal offset loss for tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with track/temporal_offset losses.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_compute_temporal_offset_loss
|
AvikantSrivastava/models
|
python
|
def _compute_temporal_offset_loss(self, input_height, input_width, prediction_dict):
'Computes the temporal offset loss for tracking.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: The dictionary returned from the predict() method.\n\n Returns:\n A dictionary with track/temporal_offset losses.\n '
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_offsets_list = self.groundtruth_lists(fields.BoxListFields.temporal_offsets)
gt_match_list = self.groundtruth_lists(fields.BoxListFields.track_match_flags)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = tf.cast(get_num_instances_from_weights(gt_weights_list), tf.float32)
offset_predictions = prediction_dict[TEMPORAL_OFFSET]
num_predictions = float(len(offset_predictions))
assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]
(batch_indices, batch_offset_targets, batch_weights) = assigner.assign_temporal_offset_targets(height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_offsets_list=gt_offsets_list, gt_match_list=gt_match_list, gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, (- 1))
offset_loss_fn = self._temporal_offset_params.localization_loss
loss_dict = {}
offset_loss = 0
for offset_pred in offset_predictions:
offset_pred = cn_assigner.get_batch_predictions_from_indices(offset_pred, batch_indices)
offset_loss += offset_loss_fn(offset_pred[:, None], batch_offset_targets[:, None], weights=batch_weights)
offset_loss = (tf.reduce_sum(offset_loss) / (num_predictions * num_boxes))
loss_dict[TEMPORAL_OFFSET] = offset_loss
return loss_dict
|
def predict(self, preprocessed_inputs, _):
"Predicts CenterNet prediction tensors given an input batch.\n\n Feature extractors are free to produce predictions from multiple feature\n maps and therefore we return a dictionary mapping strings to lists.\n E.g. the hourglass backbone produces two feature maps.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float32 tensor\n representing a batch of images.\n\n Returns:\n prediction_dict: a dictionary holding predicted tensors with\n 'preprocessed_inputs' - The input image after being resized and\n preprocessed by the feature extractor.\n 'object_center' - A list of size num_feature_outputs containing\n float tensors of size [batch_size, output_height, output_width,\n num_classes] representing the predicted object center heatmap logits.\n 'box/scale' - [optional] A list of size num_feature_outputs holding\n float tensors of size [batch_size, output_height, output_width, 2]\n representing the predicted box height and width at each output\n location. This field exists only when object detection task is\n specified.\n 'box/offset' - [optional] A list of size num_feature_outputs holding\n float tensors of size [batch_size, output_height, output_width, 2]\n representing the predicted y and x offsets at each output location.\n '$TASK_NAME/keypoint_heatmap' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, num_keypoints] representing the predicted\n keypoint heatmap logits.\n '$TASK_NAME/keypoint_offset' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, 2] representing the predicted keypoint\n offsets at each output location.\n '$TASK_NAME/keypoint_regression' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, 2 * num_keypoints] representing the\n predicted keypoint regression at each output location.\n 'segmentation/heatmap' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, num_classes] representing the mask logits.\n 'densepose/heatmap' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, num_parts] representing the mask logits for each part.\n 'densepose/regression' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, 2 * num_parts] representing the DensePose surface\n coordinate predictions.\n Note the $TASK_NAME is provided by the KeypointEstimation namedtuple\n used to differentiate between different keypoint tasks.\n "
features_list = self._feature_extractor(preprocessed_inputs)
predictions = {}
for (head_name, heads) in self._prediction_head_dict.items():
predictions[head_name] = [head(feature) for (feature, head) in zip(features_list, heads)]
predictions['preprocessed_inputs'] = preprocessed_inputs
self._batched_prediction_tensor_names = predictions.keys()
return predictions
| -738,261,801,241,657,200 |
Predicts CenterNet prediction tensors given an input batch.
Feature extractors are free to produce predictions from multiple feature
maps and therefore we return a dictionary mapping strings to lists.
E.g. the hourglass backbone produces two feature maps.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding predicted tensors with
'preprocessed_inputs' - The input image after being resized and
preprocessed by the feature extractor.
'object_center' - A list of size num_feature_outputs containing
float tensors of size [batch_size, output_height, output_width,
num_classes] representing the predicted object center heatmap logits.
'box/scale' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted box height and width at each output
location. This field exists only when object detection task is
specified.
'box/offset' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted y and x offsets at each output location.
'$TASK_NAME/keypoint_heatmap' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, num_keypoints] representing the predicted
keypoint heatmap logits.
'$TASK_NAME/keypoint_offset' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2] representing the predicted keypoint
offsets at each output location.
'$TASK_NAME/keypoint_regression' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2 * num_keypoints] representing the
predicted keypoint regression at each output location.
'segmentation/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_classes] representing the mask logits.
'densepose/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_parts] representing the mask logits for each part.
'densepose/regression' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, 2 * num_parts] representing the DensePose surface
coordinate predictions.
Note the $TASK_NAME is provided by the KeypointEstimation namedtuple
used to differentiate between different keypoint tasks.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
predict
|
AvikantSrivastava/models
|
python
|
def predict(self, preprocessed_inputs, _):
"Predicts CenterNet prediction tensors given an input batch.\n\n Feature extractors are free to produce predictions from multiple feature\n maps and therefore we return a dictionary mapping strings to lists.\n E.g. the hourglass backbone produces two feature maps.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float32 tensor\n representing a batch of images.\n\n Returns:\n prediction_dict: a dictionary holding predicted tensors with\n 'preprocessed_inputs' - The input image after being resized and\n preprocessed by the feature extractor.\n 'object_center' - A list of size num_feature_outputs containing\n float tensors of size [batch_size, output_height, output_width,\n num_classes] representing the predicted object center heatmap logits.\n 'box/scale' - [optional] A list of size num_feature_outputs holding\n float tensors of size [batch_size, output_height, output_width, 2]\n representing the predicted box height and width at each output\n location. This field exists only when object detection task is\n specified.\n 'box/offset' - [optional] A list of size num_feature_outputs holding\n float tensors of size [batch_size, output_height, output_width, 2]\n representing the predicted y and x offsets at each output location.\n '$TASK_NAME/keypoint_heatmap' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, num_keypoints] representing the predicted\n keypoint heatmap logits.\n '$TASK_NAME/keypoint_offset' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, 2] representing the predicted keypoint\n offsets at each output location.\n '$TASK_NAME/keypoint_regression' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, 2 * num_keypoints] representing the\n predicted keypoint regression at each output location.\n 'segmentation/heatmap' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, num_classes] representing the mask logits.\n 'densepose/heatmap' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, num_parts] representing the mask logits for each part.\n 'densepose/regression' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, 2 * num_parts] representing the DensePose surface\n coordinate predictions.\n Note the $TASK_NAME is provided by the KeypointEstimation namedtuple\n used to differentiate between different keypoint tasks.\n "
features_list = self._feature_extractor(preprocessed_inputs)
predictions = {}
for (head_name, heads) in self._prediction_head_dict.items():
predictions[head_name] = [head(feature) for (feature, head) in zip(features_list, heads)]
predictions['preprocessed_inputs'] = preprocessed_inputs
self._batched_prediction_tensor_names = predictions.keys()
return predictions
|
def loss(self, prediction_dict, true_image_shapes, scope=None):
'Computes scalar loss tensors with respect to provided groundtruth.\n\n This function implements the various CenterNet losses.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors returned by\n "predict" function.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is of\n the form [height, width, channels] indicating the shapes of true images\n in the resized images, as resized images can be padded with zeros.\n scope: Optional scope name.\n\n Returns:\n A dictionary mapping the keys [\n \'Loss/object_center\',\n \'Loss/box/scale\', (optional)\n \'Loss/box/offset\', (optional)\n \'Loss/$TASK_NAME/keypoint/heatmap\', (optional)\n \'Loss/$TASK_NAME/keypoint/offset\', (optional)\n \'Loss/$TASK_NAME/keypoint/regression\', (optional)\n \'Loss/segmentation/heatmap\', (optional)\n \'Loss/densepose/heatmap\', (optional)\n \'Loss/densepose/regression\', (optional)\n \'Loss/track/reid\'] (optional)\n \'Loss/track/offset\'] (optional)\n scalar tensors corresponding to the losses for different tasks. Note the\n $TASK_NAME is provided by the KeypointEstimation namedtuple used to\n differentiate between different keypoint tasks.\n '
(_, input_height, input_width, _) = _get_shape(prediction_dict['preprocessed_inputs'], 4)
(output_height, output_width) = ((input_height // self._stride), (input_width // self._stride))
output_true_image_shapes = tf.ceil((tf.to_float(true_image_shapes) / self._stride))
valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(output_true_image_shapes, output_height, output_width)
valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)
object_center_loss = self._compute_object_center_loss(object_center_predictions=prediction_dict[OBJECT_CENTER], input_height=input_height, input_width=input_width, per_pixel_weights=valid_anchor_weights)
losses = {OBJECT_CENTER: (self._center_params.object_center_loss_weight * object_center_loss)}
if (self._od_params is not None):
od_losses = self._compute_object_detection_losses(input_height=input_height, input_width=input_width, prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights)
for key in od_losses:
od_losses[key] = (od_losses[key] * self._od_params.task_loss_weight)
losses.update(od_losses)
if (self._kp_params_dict is not None):
for (task_name, params) in self._kp_params_dict.items():
kp_losses = self._compute_keypoint_estimation_losses(task_name=task_name, input_height=input_height, input_width=input_width, prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights)
for key in kp_losses:
kp_losses[key] = (kp_losses[key] * params.task_loss_weight)
losses.update(kp_losses)
if (self._mask_params is not None):
seg_losses = self._compute_segmentation_losses(prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights)
for key in seg_losses:
seg_losses[key] = (seg_losses[key] * self._mask_params.task_loss_weight)
losses.update(seg_losses)
if (self._densepose_params is not None):
densepose_losses = self._compute_densepose_losses(input_height=input_height, input_width=input_width, prediction_dict=prediction_dict)
for key in densepose_losses:
densepose_losses[key] = (densepose_losses[key] * self._densepose_params.task_loss_weight)
losses.update(densepose_losses)
if (self._track_params is not None):
track_losses = self._compute_track_losses(input_height=input_height, input_width=input_width, prediction_dict=prediction_dict)
for key in track_losses:
track_losses[key] = (track_losses[key] * self._track_params.task_loss_weight)
losses.update(track_losses)
if (self._temporal_offset_params is not None):
offset_losses = self._compute_temporal_offset_loss(input_height=input_height, input_width=input_width, prediction_dict=prediction_dict)
for key in offset_losses:
offset_losses[key] = (offset_losses[key] * self._temporal_offset_params.task_loss_weight)
losses.update(offset_losses)
return dict([(('%s/%s' % (LOSS_KEY_PREFIX, key)), val) for (key, val) in losses.items()])
| 4,125,836,591,047,334,000 |
Computes scalar loss tensors with respect to provided groundtruth.
This function implements the various CenterNet losses.
Args:
prediction_dict: a dictionary holding predicted tensors returned by
"predict" function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
scope: Optional scope name.
Returns:
A dictionary mapping the keys [
'Loss/object_center',
'Loss/box/scale', (optional)
'Loss/box/offset', (optional)
'Loss/$TASK_NAME/keypoint/heatmap', (optional)
'Loss/$TASK_NAME/keypoint/offset', (optional)
'Loss/$TASK_NAME/keypoint/regression', (optional)
'Loss/segmentation/heatmap', (optional)
'Loss/densepose/heatmap', (optional)
'Loss/densepose/regression', (optional)
'Loss/track/reid'] (optional)
'Loss/track/offset'] (optional)
scalar tensors corresponding to the losses for different tasks. Note the
$TASK_NAME is provided by the KeypointEstimation namedtuple used to
differentiate between different keypoint tasks.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
loss
|
AvikantSrivastava/models
|
python
|
def loss(self, prediction_dict, true_image_shapes, scope=None):
'Computes scalar loss tensors with respect to provided groundtruth.\n\n This function implements the various CenterNet losses.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors returned by\n "predict" function.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is of\n the form [height, width, channels] indicating the shapes of true images\n in the resized images, as resized images can be padded with zeros.\n scope: Optional scope name.\n\n Returns:\n A dictionary mapping the keys [\n \'Loss/object_center\',\n \'Loss/box/scale\', (optional)\n \'Loss/box/offset\', (optional)\n \'Loss/$TASK_NAME/keypoint/heatmap\', (optional)\n \'Loss/$TASK_NAME/keypoint/offset\', (optional)\n \'Loss/$TASK_NAME/keypoint/regression\', (optional)\n \'Loss/segmentation/heatmap\', (optional)\n \'Loss/densepose/heatmap\', (optional)\n \'Loss/densepose/regression\', (optional)\n \'Loss/track/reid\'] (optional)\n \'Loss/track/offset\'] (optional)\n scalar tensors corresponding to the losses for different tasks. Note the\n $TASK_NAME is provided by the KeypointEstimation namedtuple used to\n differentiate between different keypoint tasks.\n '
(_, input_height, input_width, _) = _get_shape(prediction_dict['preprocessed_inputs'], 4)
(output_height, output_width) = ((input_height // self._stride), (input_width // self._stride))
output_true_image_shapes = tf.ceil((tf.to_float(true_image_shapes) / self._stride))
valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(output_true_image_shapes, output_height, output_width)
valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)
object_center_loss = self._compute_object_center_loss(object_center_predictions=prediction_dict[OBJECT_CENTER], input_height=input_height, input_width=input_width, per_pixel_weights=valid_anchor_weights)
losses = {OBJECT_CENTER: (self._center_params.object_center_loss_weight * object_center_loss)}
if (self._od_params is not None):
od_losses = self._compute_object_detection_losses(input_height=input_height, input_width=input_width, prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights)
for key in od_losses:
od_losses[key] = (od_losses[key] * self._od_params.task_loss_weight)
losses.update(od_losses)
if (self._kp_params_dict is not None):
for (task_name, params) in self._kp_params_dict.items():
kp_losses = self._compute_keypoint_estimation_losses(task_name=task_name, input_height=input_height, input_width=input_width, prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights)
for key in kp_losses:
kp_losses[key] = (kp_losses[key] * params.task_loss_weight)
losses.update(kp_losses)
if (self._mask_params is not None):
seg_losses = self._compute_segmentation_losses(prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights)
for key in seg_losses:
seg_losses[key] = (seg_losses[key] * self._mask_params.task_loss_weight)
losses.update(seg_losses)
if (self._densepose_params is not None):
densepose_losses = self._compute_densepose_losses(input_height=input_height, input_width=input_width, prediction_dict=prediction_dict)
for key in densepose_losses:
densepose_losses[key] = (densepose_losses[key] * self._densepose_params.task_loss_weight)
losses.update(densepose_losses)
if (self._track_params is not None):
track_losses = self._compute_track_losses(input_height=input_height, input_width=input_width, prediction_dict=prediction_dict)
for key in track_losses:
track_losses[key] = (track_losses[key] * self._track_params.task_loss_weight)
losses.update(track_losses)
if (self._temporal_offset_params is not None):
offset_losses = self._compute_temporal_offset_loss(input_height=input_height, input_width=input_width, prediction_dict=prediction_dict)
for key in offset_losses:
offset_losses[key] = (offset_losses[key] * self._temporal_offset_params.task_loss_weight)
losses.update(offset_losses)
return dict([(('%s/%s' % (LOSS_KEY_PREFIX, key)), val) for (key, val) in losses.items()])
|
def postprocess(self, prediction_dict, true_image_shapes, **params):
'Produces boxes given a prediction dict returned by predict().\n\n Although predict returns a list of tensors, only the last tensor in\n each list is used for making box predictions.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors from "predict"\n function.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is of\n the form [height, width, channels] indicating the shapes of true images\n in the resized images, as resized images can be padded with zeros.\n **params: Currently ignored.\n\n Returns:\n detections: a dictionary containing the following fields\n detection_boxes - A tensor of shape [batch, max_detections, 4]\n holding the predicted boxes.\n detection_boxes_strided: A tensor of shape [batch_size, num_detections,\n 4] holding the predicted boxes in absolute coordinates of the\n feature extractor\'s final layer output.\n detection_scores: A tensor of shape [batch, max_detections] holding\n the predicted score for each box.\n detection_classes: An integer tensor of shape [batch, max_detections]\n containing the detected class for each box.\n num_detections: An integer tensor of shape [batch] containing the\n number of detected boxes for each sample in the batch.\n detection_keypoints: (Optional) A float tensor of shape [batch,\n max_detections, num_keypoints, 2] with normalized keypoints. Any\n invalid keypoints have their coordinates and scores set to 0.0.\n detection_keypoint_scores: (Optional) A float tensor of shape [batch,\n max_detection, num_keypoints] with scores for each keypoint.\n detection_masks: (Optional) A uint8 tensor of shape [batch,\n max_detections, mask_height, mask_width] with masks for each\n detection. Background is specified with 0, and foreground is specified\n with positive integers (1 for standard instance segmentation mask, and\n 1-indexed parts for DensePose task).\n detection_surface_coords: (Optional) A float32 tensor of shape [batch,\n max_detection, mask_height, mask_width, 2] with DensePose surface\n coordinates, in (v, u) format.\n detection_embeddings: (Optional) A float tensor of shape [batch,\n max_detections, reid_embed_size] containing object embeddings.\n '
object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][(- 1)])
(detection_scores, y_indices, x_indices, channel_indices) = top_k_feature_map_locations(object_center_prob, max_pool_kernel_size=3, k=self._center_params.max_box_predictions)
(boxes_strided, classes, scores, num_detections) = prediction_tensors_to_boxes(detection_scores, y_indices, x_indices, channel_indices, prediction_dict[BOX_SCALE][(- 1)], prediction_dict[BOX_OFFSET][(- 1)])
boxes = convert_strided_predictions_to_normalized_boxes(boxes_strided, self._stride, true_image_shapes)
postprocess_dict = {fields.DetectionResultFields.detection_boxes: boxes, fields.DetectionResultFields.detection_scores: scores, fields.DetectionResultFields.detection_classes: classes, fields.DetectionResultFields.num_detections: num_detections, 'detection_boxes_strided': boxes_strided}
if self._kp_params_dict:
(keypoints, keypoint_scores) = self._postprocess_keypoints(prediction_dict, classes, y_indices, x_indices, boxes_strided, num_detections)
(keypoints, keypoint_scores) = convert_strided_predictions_to_normalized_keypoints(keypoints, keypoint_scores, self._stride, true_image_shapes, clip_out_of_frame_keypoints=True)
postprocess_dict.update({fields.DetectionResultFields.detection_keypoints: keypoints, fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores})
if self._mask_params:
masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][(- 1)])
(densepose_part_heatmap, densepose_surface_coords) = (None, None)
densepose_class_index = 0
if self._densepose_params:
densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][(- 1)]
densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][(- 1)]
densepose_class_index = self._densepose_params.class_id
(instance_masks, surface_coords) = convert_strided_predictions_to_instance_masks(boxes, classes, masks, true_image_shapes, densepose_part_heatmap, densepose_surface_coords, stride=self._stride, mask_height=self._mask_params.mask_height, mask_width=self._mask_params.mask_width, score_threshold=self._mask_params.score_threshold, densepose_class_index=densepose_class_index)
postprocess_dict[fields.DetectionResultFields.detection_masks] = instance_masks
if self._densepose_params:
postprocess_dict[fields.DetectionResultFields.detection_surface_coords] = surface_coords
if self._track_params:
embeddings = self._postprocess_embeddings(prediction_dict, y_indices, x_indices)
postprocess_dict.update({fields.DetectionResultFields.detection_embeddings: embeddings})
if self._temporal_offset_params:
offsets = prediction_tensors_to_temporal_offsets(y_indices, x_indices, prediction_dict[TEMPORAL_OFFSET][(- 1)])
postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets
return postprocess_dict
| -427,511,309,887,318,850 |
Produces boxes given a prediction dict returned by predict().
Although predict returns a list of tensors, only the last tensor in
each list is used for making box predictions.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_boxes - A tensor of shape [batch, max_detections, 4]
holding the predicted boxes.
detection_boxes_strided: A tensor of shape [batch_size, num_detections,
4] holding the predicted boxes in absolute coordinates of the
feature extractor's final layer output.
detection_scores: A tensor of shape [batch, max_detections] holding
the predicted score for each box.
detection_classes: An integer tensor of shape [batch, max_detections]
containing the detected class for each box.
num_detections: An integer tensor of shape [batch] containing the
number of detected boxes for each sample in the batch.
detection_keypoints: (Optional) A float tensor of shape [batch,
max_detections, num_keypoints, 2] with normalized keypoints. Any
invalid keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: (Optional) A float tensor of shape [batch,
max_detection, num_keypoints] with scores for each keypoint.
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
detection_surface_coords: (Optional) A float32 tensor of shape [batch,
max_detection, mask_height, mask_width, 2] with DensePose surface
coordinates, in (v, u) format.
detection_embeddings: (Optional) A float tensor of shape [batch,
max_detections, reid_embed_size] containing object embeddings.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
postprocess
|
AvikantSrivastava/models
|
python
|
def postprocess(self, prediction_dict, true_image_shapes, **params):
'Produces boxes given a prediction dict returned by predict().\n\n Although predict returns a list of tensors, only the last tensor in\n each list is used for making box predictions.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors from "predict"\n function.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is of\n the form [height, width, channels] indicating the shapes of true images\n in the resized images, as resized images can be padded with zeros.\n **params: Currently ignored.\n\n Returns:\n detections: a dictionary containing the following fields\n detection_boxes - A tensor of shape [batch, max_detections, 4]\n holding the predicted boxes.\n detection_boxes_strided: A tensor of shape [batch_size, num_detections,\n 4] holding the predicted boxes in absolute coordinates of the\n feature extractor\'s final layer output.\n detection_scores: A tensor of shape [batch, max_detections] holding\n the predicted score for each box.\n detection_classes: An integer tensor of shape [batch, max_detections]\n containing the detected class for each box.\n num_detections: An integer tensor of shape [batch] containing the\n number of detected boxes for each sample in the batch.\n detection_keypoints: (Optional) A float tensor of shape [batch,\n max_detections, num_keypoints, 2] with normalized keypoints. Any\n invalid keypoints have their coordinates and scores set to 0.0.\n detection_keypoint_scores: (Optional) A float tensor of shape [batch,\n max_detection, num_keypoints] with scores for each keypoint.\n detection_masks: (Optional) A uint8 tensor of shape [batch,\n max_detections, mask_height, mask_width] with masks for each\n detection. Background is specified with 0, and foreground is specified\n with positive integers (1 for standard instance segmentation mask, and\n 1-indexed parts for DensePose task).\n detection_surface_coords: (Optional) A float32 tensor of shape [batch,\n max_detection, mask_height, mask_width, 2] with DensePose surface\n coordinates, in (v, u) format.\n detection_embeddings: (Optional) A float tensor of shape [batch,\n max_detections, reid_embed_size] containing object embeddings.\n '
object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][(- 1)])
(detection_scores, y_indices, x_indices, channel_indices) = top_k_feature_map_locations(object_center_prob, max_pool_kernel_size=3, k=self._center_params.max_box_predictions)
(boxes_strided, classes, scores, num_detections) = prediction_tensors_to_boxes(detection_scores, y_indices, x_indices, channel_indices, prediction_dict[BOX_SCALE][(- 1)], prediction_dict[BOX_OFFSET][(- 1)])
boxes = convert_strided_predictions_to_normalized_boxes(boxes_strided, self._stride, true_image_shapes)
postprocess_dict = {fields.DetectionResultFields.detection_boxes: boxes, fields.DetectionResultFields.detection_scores: scores, fields.DetectionResultFields.detection_classes: classes, fields.DetectionResultFields.num_detections: num_detections, 'detection_boxes_strided': boxes_strided}
if self._kp_params_dict:
(keypoints, keypoint_scores) = self._postprocess_keypoints(prediction_dict, classes, y_indices, x_indices, boxes_strided, num_detections)
(keypoints, keypoint_scores) = convert_strided_predictions_to_normalized_keypoints(keypoints, keypoint_scores, self._stride, true_image_shapes, clip_out_of_frame_keypoints=True)
postprocess_dict.update({fields.DetectionResultFields.detection_keypoints: keypoints, fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores})
if self._mask_params:
masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][(- 1)])
(densepose_part_heatmap, densepose_surface_coords) = (None, None)
densepose_class_index = 0
if self._densepose_params:
densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][(- 1)]
densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][(- 1)]
densepose_class_index = self._densepose_params.class_id
(instance_masks, surface_coords) = convert_strided_predictions_to_instance_masks(boxes, classes, masks, true_image_shapes, densepose_part_heatmap, densepose_surface_coords, stride=self._stride, mask_height=self._mask_params.mask_height, mask_width=self._mask_params.mask_width, score_threshold=self._mask_params.score_threshold, densepose_class_index=densepose_class_index)
postprocess_dict[fields.DetectionResultFields.detection_masks] = instance_masks
if self._densepose_params:
postprocess_dict[fields.DetectionResultFields.detection_surface_coords] = surface_coords
if self._track_params:
embeddings = self._postprocess_embeddings(prediction_dict, y_indices, x_indices)
postprocess_dict.update({fields.DetectionResultFields.detection_embeddings: embeddings})
if self._temporal_offset_params:
offsets = prediction_tensors_to_temporal_offsets(y_indices, x_indices, prediction_dict[TEMPORAL_OFFSET][(- 1)])
postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets
return postprocess_dict
|
def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):
'Performs postprocessing on embedding predictions.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors, returned from the\n predict() method. This dictionary should contain embedding prediction\n feature maps for tracking task.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n\n Returns:\n embeddings: A [batch_size, max_detection, reid_embed_size] float32\n tensor with L2 normalized embeddings extracted from detection box\n centers.\n '
embedding_predictions = prediction_dict[TRACK_REID][(- 1)]
embeddings = predicted_embeddings_at_object_centers(embedding_predictions, y_indices, x_indices)
(embeddings, _) = tf.linalg.normalize(embeddings, axis=(- 1))
return embeddings
| -3,924,071,412,387,423,000 |
Performs postprocessing on embedding predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain embedding prediction
feature maps for tracking task.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
Returns:
embeddings: A [batch_size, max_detection, reid_embed_size] float32
tensor with L2 normalized embeddings extracted from detection box
centers.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_postprocess_embeddings
|
AvikantSrivastava/models
|
python
|
def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):
'Performs postprocessing on embedding predictions.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors, returned from the\n predict() method. This dictionary should contain embedding prediction\n feature maps for tracking task.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n\n Returns:\n embeddings: A [batch_size, max_detection, reid_embed_size] float32\n tensor with L2 normalized embeddings extracted from detection box\n centers.\n '
embedding_predictions = prediction_dict[TRACK_REID][(- 1)]
embeddings = predicted_embeddings_at_object_centers(embedding_predictions, y_indices, x_indices)
(embeddings, _) = tf.linalg.normalize(embeddings, axis=(- 1))
return embeddings
|
def _postprocess_keypoints(self, prediction_dict, classes, y_indices, x_indices, boxes, num_detections):
'Performs postprocessing on keypoint predictions.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors, returned from the\n predict() method. This dictionary should contain keypoint prediction\n feature maps for each keypoint task.\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n boxes: A [batch_size, max_detections, 4] float32 tensor with bounding\n boxes in (un-normalized) output space.\n num_detections: A [batch_size] int tensor with the number of valid\n detections for each image.\n\n Returns:\n A tuple of\n keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32\n tensor with keypoints in the output (strided) coordinate frame.\n keypoint_scores: a [batch_size, max_detections, num_total_keypoints]\n float32 tensor with keypoint scores.\n '
total_num_keypoints = sum((len(kp_dict.keypoint_indices) for kp_dict in self._kp_params_dict.values()))
(batch_size, max_detections, _) = _get_shape(boxes, 3)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
for ex_ind in range(batch_size):
kpt_coords_for_class_list = []
kpt_scores_for_class_list = []
instance_inds_for_class_list = []
for (task_name, kp_params) in self._kp_params_dict.items():
keypoint_heatmap = prediction_dict[get_keypoint_name(task_name, KEYPOINT_HEATMAP)][(- 1)]
keypoint_offsets = prediction_dict[get_keypoint_name(task_name, KEYPOINT_OFFSET)][(- 1)]
keypoint_regression = prediction_dict[get_keypoint_name(task_name, KEYPOINT_REGRESSION)][(- 1)]
instance_inds = self._get_instance_indices(classes, num_detections, ex_ind, kp_params.class_id)
def true_fn(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params):
'Logics to execute when instance_inds is not an empty set.'
(kpt_coords_for_class, kpt_scores_for_class) = self._postprocess_keypoints_for_class_and_image(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params)
(kpts_coords_for_class_padded, kpt_scores_for_class_padded) = _pad_to_full_keypoint_dim(kpt_coords_for_class, kpt_scores_for_class, kp_params.keypoint_indices, total_num_keypoints)
return (kpts_coords_for_class_padded, kpt_scores_for_class_padded)
def false_fn():
'Logics to execute when the instance_inds is an empty set.'
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32), tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
true_fn = functools.partial(true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params)
results = tf.cond((tf.size(instance_inds) > 0), true_fn, false_fn)
kpt_coords_for_class_list.append(results[0])
kpt_scores_for_class_list.append(results[1])
instance_inds_for_class_list.append(instance_inds)
kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1)
kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1)
instance_inds_for_example = tf.concat(instance_inds_for_class_list, axis=0)
if (tf.size(instance_inds_for_example) > 0):
(kpt_coords_for_example_all_det, kpt_scores_for_example_all_det) = _pad_to_full_instance_dim(kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example, self._center_params.max_box_predictions)
else:
kpt_coords_for_example_all_det = tf.zeros([1, max_detections, total_num_keypoints, 2], dtype=tf.float32)
kpt_scores_for_example_all_det = tf.zeros([1, max_detections, total_num_keypoints], dtype=tf.float32)
kpt_coords_for_example_list.append(kpt_coords_for_example_all_det)
kpt_scores_for_example_list.append(kpt_scores_for_example_all_det)
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
return (keypoints, keypoint_scores)
| -4,493,164,361,329,250,300 |
Performs postprocessing on keypoint predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_postprocess_keypoints
|
AvikantSrivastava/models
|
python
|
def _postprocess_keypoints(self, prediction_dict, classes, y_indices, x_indices, boxes, num_detections):
'Performs postprocessing on keypoint predictions.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors, returned from the\n predict() method. This dictionary should contain keypoint prediction\n feature maps for each keypoint task.\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n boxes: A [batch_size, max_detections, 4] float32 tensor with bounding\n boxes in (un-normalized) output space.\n num_detections: A [batch_size] int tensor with the number of valid\n detections for each image.\n\n Returns:\n A tuple of\n keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32\n tensor with keypoints in the output (strided) coordinate frame.\n keypoint_scores: a [batch_size, max_detections, num_total_keypoints]\n float32 tensor with keypoint scores.\n '
total_num_keypoints = sum((len(kp_dict.keypoint_indices) for kp_dict in self._kp_params_dict.values()))
(batch_size, max_detections, _) = _get_shape(boxes, 3)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
for ex_ind in range(batch_size):
kpt_coords_for_class_list = []
kpt_scores_for_class_list = []
instance_inds_for_class_list = []
for (task_name, kp_params) in self._kp_params_dict.items():
keypoint_heatmap = prediction_dict[get_keypoint_name(task_name, KEYPOINT_HEATMAP)][(- 1)]
keypoint_offsets = prediction_dict[get_keypoint_name(task_name, KEYPOINT_OFFSET)][(- 1)]
keypoint_regression = prediction_dict[get_keypoint_name(task_name, KEYPOINT_REGRESSION)][(- 1)]
instance_inds = self._get_instance_indices(classes, num_detections, ex_ind, kp_params.class_id)
def true_fn(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params):
'Logics to execute when instance_inds is not an empty set.'
(kpt_coords_for_class, kpt_scores_for_class) = self._postprocess_keypoints_for_class_and_image(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params)
(kpts_coords_for_class_padded, kpt_scores_for_class_padded) = _pad_to_full_keypoint_dim(kpt_coords_for_class, kpt_scores_for_class, kp_params.keypoint_indices, total_num_keypoints)
return (kpts_coords_for_class_padded, kpt_scores_for_class_padded)
def false_fn():
'Logics to execute when the instance_inds is an empty set.'
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32), tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
true_fn = functools.partial(true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params)
results = tf.cond((tf.size(instance_inds) > 0), true_fn, false_fn)
kpt_coords_for_class_list.append(results[0])
kpt_scores_for_class_list.append(results[1])
instance_inds_for_class_list.append(instance_inds)
kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1)
kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1)
instance_inds_for_example = tf.concat(instance_inds_for_class_list, axis=0)
if (tf.size(instance_inds_for_example) > 0):
(kpt_coords_for_example_all_det, kpt_scores_for_example_all_det) = _pad_to_full_instance_dim(kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example, self._center_params.max_box_predictions)
else:
kpt_coords_for_example_all_det = tf.zeros([1, max_detections, total_num_keypoints, 2], dtype=tf.float32)
kpt_scores_for_example_all_det = tf.zeros([1, max_detections, total_num_keypoints], dtype=tf.float32)
kpt_coords_for_example_list.append(kpt_coords_for_example_all_det)
kpt_scores_for_example_list.append(kpt_scores_for_example_all_det)
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
return (keypoints, keypoint_scores)
|
def _get_instance_indices(self, classes, num_detections, batch_index, class_id):
'Gets the instance indices that match the target class ID.\n\n Args:\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n num_detections: A [batch_size] int tensor with the number of valid\n detections for each image.\n batch_index: An integer specifying the index for an example in the batch.\n class_id: Class id\n\n Returns:\n instance_inds: A [num_instances] int tensor where each element indicates\n the instance location within the `classes` tensor. This is useful to\n associate the refined keypoints with the original detections (i.e.\n boxes)\n '
classes = classes[batch_index:(batch_index + 1), ...]
(_, max_detections) = shape_utils.combined_static_and_dynamic_shape(classes)
valid_detections_with_kpt_class = tf.math.logical_and((tf.range(max_detections) < num_detections[batch_index]), (classes[0] == class_id))
instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]
return instance_inds
| -8,815,530,122,229,937,000 |
Gets the instance indices that match the target class ID.
Args:
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
batch_index: An integer specifying the index for an example in the batch.
class_id: Class id
Returns:
instance_inds: A [num_instances] int tensor where each element indicates
the instance location within the `classes` tensor. This is useful to
associate the refined keypoints with the original detections (i.e.
boxes)
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_get_instance_indices
|
AvikantSrivastava/models
|
python
|
def _get_instance_indices(self, classes, num_detections, batch_index, class_id):
'Gets the instance indices that match the target class ID.\n\n Args:\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n num_detections: A [batch_size] int tensor with the number of valid\n detections for each image.\n batch_index: An integer specifying the index for an example in the batch.\n class_id: Class id\n\n Returns:\n instance_inds: A [num_instances] int tensor where each element indicates\n the instance location within the `classes` tensor. This is useful to\n associate the refined keypoints with the original detections (i.e.\n boxes)\n '
classes = classes[batch_index:(batch_index + 1), ...]
(_, max_detections) = shape_utils.combined_static_and_dynamic_shape(classes)
valid_detections_with_kpt_class = tf.math.logical_and((tf.range(max_detections) < num_detections[batch_index]), (classes[0] == class_id))
instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]
return instance_inds
|
def _postprocess_keypoints_for_class_and_image(self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, indices_with_kpt_class, batch_index, kp_params):
'Postprocess keypoints for a single image and class.\n\n This function performs the following postprocessing operations on a single\n image and single keypoint class:\n - Converts keypoints scores to range [0, 1] with sigmoid.\n - Determines the detections that correspond to the specified keypoint class.\n - Gathers the regressed keypoints at the detection (i.e. box) centers.\n - Gathers keypoint candidates from the keypoint heatmaps.\n - Snaps regressed keypoints to nearby keypoint candidates.\n\n Args:\n keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32\n tensor with keypoint heatmaps.\n keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with\n local offsets to keypoint centers.\n keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]\n float32 tensor with regressed offsets to all keypoints.\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n boxes: A [batch_size, max_detections, 4] float32 tensor with detected\n boxes in the output (strided) frame.\n indices_with_kpt_class: A [num_instances] int tensor where each element\n indicates the instance location within the `classes` tensor. This is\n useful to associate the refined keypoints with the original detections\n (i.e. boxes)\n batch_index: An integer specifying the index for an example in the batch.\n kp_params: A `KeypointEstimationParams` object with parameters for a\n single keypoint class.\n\n Returns:\n A tuple of\n refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor\n with refined keypoints for a single class in a single image, expressed\n in the output (strided) coordinate frame. Note that `num_instances` is a\n dynamic dimension, and corresponds to the number of valid detections\n for the specific class.\n refined_scores: A [1, num_instances, num_keypoints] float32 tensor with\n keypoint scores.\n '
keypoint_indices = kp_params.keypoint_indices
num_keypoints = len(keypoint_indices)
keypoint_heatmap = tf.nn.sigmoid(keypoint_heatmap[batch_index:(batch_index + 1), ...])
keypoint_offsets = keypoint_offsets[batch_index:(batch_index + 1), ...]
keypoint_regression = keypoint_regression[batch_index:(batch_index + 1), ...]
y_indices = y_indices[batch_index:(batch_index + 1), ...]
x_indices = x_indices[batch_index:(batch_index + 1), ...]
y_indices_for_kpt_class = tf.gather(y_indices, indices_with_kpt_class, axis=1)
x_indices_for_kpt_class = tf.gather(x_indices, indices_with_kpt_class, axis=1)
boxes_for_kpt_class = tf.gather(boxes, indices_with_kpt_class, axis=1)
regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(keypoint_regression, y_indices_for_kpt_class, x_indices_for_kpt_class)
regressed_keypoints_for_objects = tf.reshape(regressed_keypoints_for_objects, [1, (- 1), num_keypoints, 2])
(keypoint_candidates, keypoint_scores, num_keypoint_candidates) = prediction_tensors_to_keypoint_candidates(keypoint_heatmap, keypoint_offsets, keypoint_score_threshold=kp_params.keypoint_candidate_score_threshold, max_pool_kernel_size=kp_params.peak_max_pool_kernel_size, max_candidates=kp_params.num_candidates_per_keypoint)
(refined_keypoints, refined_scores) = refine_keypoints(regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=boxes_for_kpt_class, unmatched_keypoint_score=kp_params.unmatched_keypoint_score, box_scale=kp_params.box_scale, candidate_search_scale=kp_params.candidate_search_scale, candidate_ranking_mode=kp_params.candidate_ranking_mode)
return (refined_keypoints, refined_scores)
| 4,418,362,711,257,832,400 |
Postprocess keypoints for a single image and class.
This function performs the following postprocessing operations on a single
image and single keypoint class:
- Converts keypoints scores to range [0, 1] with sigmoid.
- Determines the detections that correspond to the specified keypoint class.
- Gathers the regressed keypoints at the detection (i.e. box) centers.
- Gathers keypoint candidates from the keypoint heatmaps.
- Snaps regressed keypoints to nearby keypoint candidates.
Args:
keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32
tensor with keypoint heatmaps.
keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with
local offsets to keypoint centers.
keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]
float32 tensor with regressed offsets to all keypoints.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with detected
boxes in the output (strided) frame.
indices_with_kpt_class: A [num_instances] int tensor where each element
indicates the instance location within the `classes` tensor. This is
useful to associate the refined keypoints with the original detections
(i.e. boxes)
batch_index: An integer specifying the index for an example in the batch.
kp_params: A `KeypointEstimationParams` object with parameters for a
single keypoint class.
Returns:
A tuple of
refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor
with refined keypoints for a single class in a single image, expressed
in the output (strided) coordinate frame. Note that `num_instances` is a
dynamic dimension, and corresponds to the number of valid detections
for the specific class.
refined_scores: A [1, num_instances, num_keypoints] float32 tensor with
keypoint scores.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
_postprocess_keypoints_for_class_and_image
|
AvikantSrivastava/models
|
python
|
def _postprocess_keypoints_for_class_and_image(self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, indices_with_kpt_class, batch_index, kp_params):
'Postprocess keypoints for a single image and class.\n\n This function performs the following postprocessing operations on a single\n image and single keypoint class:\n - Converts keypoints scores to range [0, 1] with sigmoid.\n - Determines the detections that correspond to the specified keypoint class.\n - Gathers the regressed keypoints at the detection (i.e. box) centers.\n - Gathers keypoint candidates from the keypoint heatmaps.\n - Snaps regressed keypoints to nearby keypoint candidates.\n\n Args:\n keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32\n tensor with keypoint heatmaps.\n keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with\n local offsets to keypoint centers.\n keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]\n float32 tensor with regressed offsets to all keypoints.\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n boxes: A [batch_size, max_detections, 4] float32 tensor with detected\n boxes in the output (strided) frame.\n indices_with_kpt_class: A [num_instances] int tensor where each element\n indicates the instance location within the `classes` tensor. This is\n useful to associate the refined keypoints with the original detections\n (i.e. boxes)\n batch_index: An integer specifying the index for an example in the batch.\n kp_params: A `KeypointEstimationParams` object with parameters for a\n single keypoint class.\n\n Returns:\n A tuple of\n refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor\n with refined keypoints for a single class in a single image, expressed\n in the output (strided) coordinate frame. Note that `num_instances` is a\n dynamic dimension, and corresponds to the number of valid detections\n for the specific class.\n refined_scores: A [1, num_instances, num_keypoints] float32 tensor with\n keypoint scores.\n '
keypoint_indices = kp_params.keypoint_indices
num_keypoints = len(keypoint_indices)
keypoint_heatmap = tf.nn.sigmoid(keypoint_heatmap[batch_index:(batch_index + 1), ...])
keypoint_offsets = keypoint_offsets[batch_index:(batch_index + 1), ...]
keypoint_regression = keypoint_regression[batch_index:(batch_index + 1), ...]
y_indices = y_indices[batch_index:(batch_index + 1), ...]
x_indices = x_indices[batch_index:(batch_index + 1), ...]
y_indices_for_kpt_class = tf.gather(y_indices, indices_with_kpt_class, axis=1)
x_indices_for_kpt_class = tf.gather(x_indices, indices_with_kpt_class, axis=1)
boxes_for_kpt_class = tf.gather(boxes, indices_with_kpt_class, axis=1)
regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(keypoint_regression, y_indices_for_kpt_class, x_indices_for_kpt_class)
regressed_keypoints_for_objects = tf.reshape(regressed_keypoints_for_objects, [1, (- 1), num_keypoints, 2])
(keypoint_candidates, keypoint_scores, num_keypoint_candidates) = prediction_tensors_to_keypoint_candidates(keypoint_heatmap, keypoint_offsets, keypoint_score_threshold=kp_params.keypoint_candidate_score_threshold, max_pool_kernel_size=kp_params.peak_max_pool_kernel_size, max_candidates=kp_params.num_candidates_per_keypoint)
(refined_keypoints, refined_scores) = refine_keypoints(regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=boxes_for_kpt_class, unmatched_keypoint_score=kp_params.unmatched_keypoint_score, box_scale=kp_params.box_scale, candidate_search_scale=kp_params.candidate_search_scale, candidate_ranking_mode=kp_params.candidate_ranking_mode)
return (refined_keypoints, refined_scores)
|
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"Returns a map of Trackable objects to load from a foreign checkpoint.\n\n Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module\n or Checkpoint). This enables the model to initialize based on weights from\n another task. For example, the feature extractor variables from a\n classification model can be used to bootstrap training of an object\n detector. When loading from an object detection model, the checkpoint model\n should have the same parameters as this detection model with exception of\n the num_classes parameter.\n\n Note that this function is intended to be used to restore Keras-based\n models when running Tensorflow 2, whereas restore_map (not implemented\n in CenterNet) is intended to be used to restore Slim-based models when\n running Tensorflow 1.x.\n\n TODO(jonathanhuang): Make this function consistent with other\n meta-architectures.\n\n Args:\n fine_tune_checkpoint_type: whether to restore from a full detection\n checkpoint (with compatible variable names) or to restore from a\n classification checkpoint for initialization prior to training.\n Valid values: `detection`, `classification`, `fine_tune`.\n Default 'detection'.\n 'detection': used when loading models pre-trained on other detection\n tasks. With this checkpoint type the weights of the feature extractor\n are expected under the attribute 'feature_extractor'.\n 'classification': used when loading models pre-trained on an image\n classification task. Note that only the encoder section of the network\n is loaded and not the upsampling layers. With this checkpoint type,\n the weights of only the encoder section are expected under the\n attribute 'feature_extractor'.\n 'fine_tune': used when loading the entire CenterNet feature extractor\n pre-trained on other tasks. The checkpoints saved during CenterNet\n model training can be directly loaded using this type. With this\n checkpoint type, the weights of the feature extractor are expected\n under the attribute 'model._feature_extractor'.\n For more details, see the tensorflow section on Loading mechanics.\n https://www.tensorflow.org/guide/checkpoint#loading_mechanics\n\n Returns:\n A dict mapping keys to Trackable objects (tf.Module or Checkpoint).\n "
supported_types = self._feature_extractor.supported_sub_model_types
supported_types += ['fine_tune']
if (fine_tune_checkpoint_type not in supported_types):
message = 'Checkpoint type "{}" not supported for {}. Supported types are {}'
raise ValueError(message.format(fine_tune_checkpoint_type, self._feature_extractor.__class__.__name__, supported_types))
elif (fine_tune_checkpoint_type == 'fine_tune'):
feature_extractor_model = tf.train.Checkpoint(_feature_extractor=self._feature_extractor)
return {'model': feature_extractor_model}
else:
return {'feature_extractor': self._feature_extractor.get_sub_model(fine_tune_checkpoint_type)}
| 3,968,676,734,106,757,600 |
Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (not implemented
in CenterNet) is intended to be used to restore Slim-based models when
running Tensorflow 1.x.
TODO(jonathanhuang): Make this function consistent with other
meta-architectures.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`, `fine_tune`.
Default 'detection'.
'detection': used when loading models pre-trained on other detection
tasks. With this checkpoint type the weights of the feature extractor
are expected under the attribute 'feature_extractor'.
'classification': used when loading models pre-trained on an image
classification task. Note that only the encoder section of the network
is loaded and not the upsampling layers. With this checkpoint type,
the weights of only the encoder section are expected under the
attribute 'feature_extractor'.
'fine_tune': used when loading the entire CenterNet feature extractor
pre-trained on other tasks. The checkpoints saved during CenterNet
model training can be directly loaded using this type. With this
checkpoint type, the weights of the feature extractor are expected
under the attribute 'model._feature_extractor'.
For more details, see the tensorflow section on Loading mechanics.
https://www.tensorflow.org/guide/checkpoint#loading_mechanics
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
restore_from_objects
|
AvikantSrivastava/models
|
python
|
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"Returns a map of Trackable objects to load from a foreign checkpoint.\n\n Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module\n or Checkpoint). This enables the model to initialize based on weights from\n another task. For example, the feature extractor variables from a\n classification model can be used to bootstrap training of an object\n detector. When loading from an object detection model, the checkpoint model\n should have the same parameters as this detection model with exception of\n the num_classes parameter.\n\n Note that this function is intended to be used to restore Keras-based\n models when running Tensorflow 2, whereas restore_map (not implemented\n in CenterNet) is intended to be used to restore Slim-based models when\n running Tensorflow 1.x.\n\n TODO(jonathanhuang): Make this function consistent with other\n meta-architectures.\n\n Args:\n fine_tune_checkpoint_type: whether to restore from a full detection\n checkpoint (with compatible variable names) or to restore from a\n classification checkpoint for initialization prior to training.\n Valid values: `detection`, `classification`, `fine_tune`.\n Default 'detection'.\n 'detection': used when loading models pre-trained on other detection\n tasks. With this checkpoint type the weights of the feature extractor\n are expected under the attribute 'feature_extractor'.\n 'classification': used when loading models pre-trained on an image\n classification task. Note that only the encoder section of the network\n is loaded and not the upsampling layers. With this checkpoint type,\n the weights of only the encoder section are expected under the\n attribute 'feature_extractor'.\n 'fine_tune': used when loading the entire CenterNet feature extractor\n pre-trained on other tasks. The checkpoints saved during CenterNet\n model training can be directly loaded using this type. With this\n checkpoint type, the weights of the feature extractor are expected\n under the attribute 'model._feature_extractor'.\n For more details, see the tensorflow section on Loading mechanics.\n https://www.tensorflow.org/guide/checkpoint#loading_mechanics\n\n Returns:\n A dict mapping keys to Trackable objects (tf.Module or Checkpoint).\n "
supported_types = self._feature_extractor.supported_sub_model_types
supported_types += ['fine_tune']
if (fine_tune_checkpoint_type not in supported_types):
message = 'Checkpoint type "{}" not supported for {}. Supported types are {}'
raise ValueError(message.format(fine_tune_checkpoint_type, self._feature_extractor.__class__.__name__, supported_types))
elif (fine_tune_checkpoint_type == 'fine_tune'):
feature_extractor_model = tf.train.Checkpoint(_feature_extractor=self._feature_extractor)
return {'model': feature_extractor_model}
else:
return {'feature_extractor': self._feature_extractor.get_sub_model(fine_tune_checkpoint_type)}
|
def true_fn(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params):
'Logics to execute when instance_inds is not an empty set.'
(kpt_coords_for_class, kpt_scores_for_class) = self._postprocess_keypoints_for_class_and_image(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params)
(kpts_coords_for_class_padded, kpt_scores_for_class_padded) = _pad_to_full_keypoint_dim(kpt_coords_for_class, kpt_scores_for_class, kp_params.keypoint_indices, total_num_keypoints)
return (kpts_coords_for_class_padded, kpt_scores_for_class_padded)
| 9,145,846,410,273,019,000 |
Logics to execute when instance_inds is not an empty set.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
true_fn
|
AvikantSrivastava/models
|
python
|
def true_fn(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params):
(kpt_coords_for_class, kpt_scores_for_class) = self._postprocess_keypoints_for_class_and_image(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params)
(kpts_coords_for_class_padded, kpt_scores_for_class_padded) = _pad_to_full_keypoint_dim(kpt_coords_for_class, kpt_scores_for_class, kp_params.keypoint_indices, total_num_keypoints)
return (kpts_coords_for_class_padded, kpt_scores_for_class_padded)
|
def false_fn():
'Logics to execute when the instance_inds is an empty set.'
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32), tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
| -7,194,681,745,874,285,000 |
Logics to execute when the instance_inds is an empty set.
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
false_fn
|
AvikantSrivastava/models
|
python
|
def false_fn():
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32), tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
|
def _single_replace(self, to_replace, method, inplace, limit):
'\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n '
if (self.ndim != 1):
raise TypeError('cannot replace {0} with method {1} on a {2}'.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = (self if inplace else self.copy())
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if ((values.dtype == orig_dtype) and inplace):
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
| 3,824,816,040,778,656,000 |
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
|
pandas/core/generic.py
|
_single_replace
|
kapilepatel/pandas
|
python
|
def _single_replace(self, to_replace, method, inplace, limit):
'\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n '
if (self.ndim != 1):
raise TypeError('cannot replace {0} with method {1} on a {2}'.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = (self if inplace else self.copy())
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if ((values.dtype == orig_dtype) and inplace):
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
|
def _doc_parms(cls):
'Return a tuple of the doc parms.'
axis_descr = ('{%s}' % ', '.join(('{0} ({1})'.format(a, i) for (i, a) in enumerate(cls._AXIS_ORDERS))))
name = (cls._constructor_sliced.__name__ if (cls._AXIS_LEN > 1) else 'scalar')
name2 = cls.__name__
return (axis_descr, name, name2)
| -8,152,487,647,864,077,000 |
Return a tuple of the doc parms.
|
pandas/core/generic.py
|
_doc_parms
|
kapilepatel/pandas
|
python
|
def _doc_parms(cls):
axis_descr = ('{%s}' % ', '.join(('{0} ({1})'.format(a, i) for (i, a) in enumerate(cls._AXIS_ORDERS))))
name = (cls._constructor_sliced.__name__ if (cls._AXIS_LEN > 1) else 'scalar')
name2 = cls.__name__
return (axis_descr, name, name2)
|
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
' passed a manager and a axes dict '
for (a, axe) in axes.items():
if (axe is not None):
mgr = mgr.reindex_axis(axe, axis=self._get_block_manager_axis(a), copy=False)
if copy:
mgr = mgr.copy()
if (dtype is not None):
if ((len(mgr.blocks) > 1) or (mgr.blocks[0].values.dtype != dtype)):
mgr = mgr.astype(dtype=dtype)
return mgr
| 5,469,990,824,350,398,000 |
passed a manager and a axes dict
|
pandas/core/generic.py
|
_init_mgr
|
kapilepatel/pandas
|
python
|
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
' '
for (a, axe) in axes.items():
if (axe is not None):
mgr = mgr.reindex_axis(axe, axis=self._get_block_manager_axis(a), copy=False)
if copy:
mgr = mgr.copy()
if (dtype is not None):
if ((len(mgr.blocks) > 1) or (mgr.blocks[0].values.dtype != dtype)):
mgr = mgr.astype(dtype=dtype)
return mgr
|
@property
def is_copy(self):
'\n Return the copy.\n '
warnings.warn("Attribute 'is_copy' is deprecated and will be removed in a future version.", FutureWarning, stacklevel=2)
return self._is_copy
| -5,138,356,737,473,574,000 |
Return the copy.
|
pandas/core/generic.py
|
is_copy
|
kapilepatel/pandas
|
python
|
@property
def is_copy(self):
'\n \n '
warnings.warn("Attribute 'is_copy' is deprecated and will be removed in a future version.", FutureWarning, stacklevel=2)
return self._is_copy
|
def _validate_dtype(self, dtype):
' validate the passed dtype '
if (dtype is not None):
dtype = pandas_dtype(dtype)
if (dtype.kind == 'V'):
raise NotImplementedError('compound dtypes are not implemented in the {0} constructor'.format(self.__class__.__name__))
return dtype
| -2,802,526,631,258,644,000 |
validate the passed dtype
|
pandas/core/generic.py
|
_validate_dtype
|
kapilepatel/pandas
|
python
|
def _validate_dtype(self, dtype):
' '
if (dtype is not None):
dtype = pandas_dtype(dtype)
if (dtype.kind == 'V'):
raise NotImplementedError('compound dtypes are not implemented in the {0} constructor'.format(self.__class__.__name__))
return dtype
|
@property
def _constructor(self):
'Used when a manipulation result has the same dimensions as the\n original.\n '
raise AbstractMethodError(self)
| 6,925,604,355,509,617,000 |
Used when a manipulation result has the same dimensions as the
original.
|
pandas/core/generic.py
|
_constructor
|
kapilepatel/pandas
|
python
|
@property
def _constructor(self):
'Used when a manipulation result has the same dimensions as the\n original.\n '
raise AbstractMethodError(self)
|
@property
def _constructor_sliced(self):
'Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n '
raise AbstractMethodError(self)
| -4,845,604,620,012,364,000 |
Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
|
pandas/core/generic.py
|
_constructor_sliced
|
kapilepatel/pandas
|
python
|
@property
def _constructor_sliced(self):
'Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n '
raise AbstractMethodError(self)
|
@property
def _constructor_expanddim(self):
'Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame() and DataFrame.to_panel()\n '
raise NotImplementedError
| 5,577,836,939,290,729,000 |
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame() and DataFrame.to_panel()
|
pandas/core/generic.py
|
_constructor_expanddim
|
kapilepatel/pandas
|
python
|
@property
def _constructor_expanddim(self):
'Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame() and DataFrame.to_panel()\n '
raise NotImplementedError
|
@classmethod
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None, slicers=None, axes_are_reversed=False, build_axes=True, ns=None, docs=None):
'Provide axes setup for the major PandasObjects.\n\n Parameters\n ----------\n axes : the names of the axes in order (lowest to highest)\n info_axis_num : the axis of the selector dimension (int)\n stat_axis_num : the number of axis for the default stats (int)\n aliases : other names for a single axis (dict)\n slicers : how axes slice to others (dict)\n axes_are_reversed : boolean whether to treat passed axes as\n reversed (DataFrame)\n build_axes : setup the axis properties (default True)\n '
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for (i, a) in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = (aliases or dict())
cls._AXIS_IALIASES = {v: k for (k, v) in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = (slicers or None)
cls._AXIS_REVERSED = axes_are_reversed
setattr(cls, '_typ', cls.__name__.lower())
cls._ix = None
if (info_axis is not None):
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if (stat_axis is not None):
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = (cls._AXIS_LEN - 1)
for (i, a) in cls._AXIS_NAMES.items():
set_axis(a, (m - i))
else:
for (i, a) in cls._AXIS_NAMES.items():
set_axis(a, i)
assert (not isinstance(ns, dict))
| -2,622,064,521,481,804,000 |
Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
|
pandas/core/generic.py
|
_setup_axes
|
kapilepatel/pandas
|
python
|
@classmethod
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None, slicers=None, axes_are_reversed=False, build_axes=True, ns=None, docs=None):
'Provide axes setup for the major PandasObjects.\n\n Parameters\n ----------\n axes : the names of the axes in order (lowest to highest)\n info_axis_num : the axis of the selector dimension (int)\n stat_axis_num : the number of axis for the default stats (int)\n aliases : other names for a single axis (dict)\n slicers : how axes slice to others (dict)\n axes_are_reversed : boolean whether to treat passed axes as\n reversed (DataFrame)\n build_axes : setup the axis properties (default True)\n '
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for (i, a) in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = (aliases or dict())
cls._AXIS_IALIASES = {v: k for (k, v) in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = (slicers or None)
cls._AXIS_REVERSED = axes_are_reversed
setattr(cls, '_typ', cls.__name__.lower())
cls._ix = None
if (info_axis is not None):
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if (stat_axis is not None):
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = (cls._AXIS_LEN - 1)
for (i, a) in cls._AXIS_NAMES.items():
set_axis(a, (m - i))
else:
for (i, a) in cls._AXIS_NAMES.items():
set_axis(a, i)
assert (not isinstance(ns, dict))
|
def _construct_axes_dict(self, axes=None, **kwargs):
'Return an axes dictionary for myself.'
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
| 9,032,129,010,733,145,000 |
Return an axes dictionary for myself.
|
pandas/core/generic.py
|
_construct_axes_dict
|
kapilepatel/pandas
|
python
|
def _construct_axes_dict(self, axes=None, **kwargs):
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
|
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
'Return an axes dictionary for the passed axes.'
d = {a: ax for (a, ax) in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d
| -5,779,026,038,520,304,000 |
Return an axes dictionary for the passed axes.
|
pandas/core/generic.py
|
_construct_axes_dict_from
|
kapilepatel/pandas
|
python
|
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
d = {a: ax for (a, ax) in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d
|
def _construct_axes_dict_for_slice(self, axes=None, **kwargs):
'Return an axes dictionary for myself.'
d = {self._AXIS_SLICEMAP[a]: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
| 3,606,054,628,442,910,700 |
Return an axes dictionary for myself.
|
pandas/core/generic.py
|
_construct_axes_dict_for_slice
|
kapilepatel/pandas
|
python
|
def _construct_axes_dict_for_slice(self, axes=None, **kwargs):
d = {self._AXIS_SLICEMAP[a]: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
|
def _construct_axes_from_arguments(self, args, kwargs, require_all=False, sentinel=None):
'Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n '
args = list(args)
for a in self._AXIS_ORDERS:
alias = self._AXIS_IALIASES.get(a)
if (alias is not None):
if (a in kwargs):
if (alias in kwargs):
raise TypeError(('arguments are mutually exclusive for [%s,%s]' % (a, alias)))
continue
if (alias in kwargs):
kwargs[a] = kwargs.pop(alias)
continue
if (a not in kwargs):
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError('not enough/duplicate arguments specified!')
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return (axes, kwargs)
| 3,963,837,012,820,377,000 |
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
|
pandas/core/generic.py
|
_construct_axes_from_arguments
|
kapilepatel/pandas
|
python
|
def _construct_axes_from_arguments(self, args, kwargs, require_all=False, sentinel=None):
'Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n '
args = list(args)
for a in self._AXIS_ORDERS:
alias = self._AXIS_IALIASES.get(a)
if (alias is not None):
if (a in kwargs):
if (alias in kwargs):
raise TypeError(('arguments are mutually exclusive for [%s,%s]' % (a, alias)))
continue
if (alias in kwargs):
kwargs[a] = kwargs.pop(alias)
continue
if (a not in kwargs):
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError('not enough/duplicate arguments specified!')
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return (axes, kwargs)
|
@classmethod
def _get_block_manager_axis(cls, axis):
'Map the axis to the block_manager axis.'
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = (cls._AXIS_LEN - 1)
return (m - axis)
return axis
| -5,141,080,245,831,676,000 |
Map the axis to the block_manager axis.
|
pandas/core/generic.py
|
_get_block_manager_axis
|
kapilepatel/pandas
|
python
|
@classmethod
def _get_block_manager_axis(cls, axis):
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = (cls._AXIS_LEN - 1)
return (m - axis)
return axis
|
@property
def shape(self):
'\n Return a tuple of axis dimensions\n '
return tuple((len(self._get_axis(a)) for a in self._AXIS_ORDERS))
| -2,033,092,480,248,761,000 |
Return a tuple of axis dimensions
|
pandas/core/generic.py
|
shape
|
kapilepatel/pandas
|
python
|
@property
def shape(self):
'\n \n '
return tuple((len(self._get_axis(a)) for a in self._AXIS_ORDERS))
|
@property
def axes(self):
'\n Return index label(s) of the internal NDFrame\n '
return [self._get_axis(a) for a in self._AXIS_ORDERS]
| -3,242,568,803,582,906,000 |
Return index label(s) of the internal NDFrame
|
pandas/core/generic.py
|
axes
|
kapilepatel/pandas
|
python
|
@property
def axes(self):
'\n \n '
return [self._get_axis(a) for a in self._AXIS_ORDERS]
|
@property
def ndim(self):
"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n "
return self._data.ndim
| 4,350,127,406,230,049,000 |
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
|
pandas/core/generic.py
|
ndim
|
kapilepatel/pandas
|
python
|
@property
def ndim(self):
"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n "
return self._data.ndim
|
@property
def size(self):
"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n "
return np.prod(self.shape)
| 4,814,416,826,443,750,000 |
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
|
pandas/core/generic.py
|
size
|
kapilepatel/pandas
|
python
|
@property
def size(self):
"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n "
return np.prod(self.shape)
|
@property
def _selected_obj(self):
' internal compat with SelectionMixin '
return self
| 223,117,681,623,033,600 |
internal compat with SelectionMixin
|
pandas/core/generic.py
|
_selected_obj
|
kapilepatel/pandas
|
python
|
@property
def _selected_obj(self):
' '
return self
|
@property
def _obj_with_exclusions(self):
' internal compat with SelectionMixin '
return self
| 3,547,958,630,176,529,000 |
internal compat with SelectionMixin
|
pandas/core/generic.py
|
_obj_with_exclusions
|
kapilepatel/pandas
|
python
|
@property
def _obj_with_exclusions(self):
' '
return self
|
def set_axis(self, labels, axis=0, inplace=None):
'\n Assign desired index to given axis.\n\n Indexes for column or row labels can be changed by assigning\n a list-like or Index.\n\n .. versionchanged:: 0.21.0\n\n The signature is now `labels` and `axis`, consistent with\n the rest of pandas API. Previously, the `axis` and `labels`\n arguments were respectively the first and second positional\n arguments.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n The axis to update. The value 0 identifies the rows, and 1\n identifies the columns.\n\n inplace : bool, default None\n Whether to return a new %(klass)s instance.\n\n .. warning::\n\n ``inplace=None`` currently falls back to to True, but in a\n future version, will default to False. Use inplace=True\n explicitly rather than relying on the default.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of same type as caller if inplace=False, None otherwise.\n\n See Also\n --------\n DataFrame.rename_axis : Alter the name of the index or columns.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis([\'a\', \'b\', \'c\'], axis=0, inplace=False)\n a 1\n b 2\n c 3\n dtype: int64\n\n The original object is not modified.\n\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n **DataFrame**\n\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis([\'a\', \'b\', \'c\'], axis=\'index\', inplace=False)\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis([\'I\', \'II\'], axis=\'columns\', inplace=False)\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis([\'i\', \'ii\'], axis=\'columns\', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n '
if is_scalar(labels):
warnings.warn('set_axis now takes "labels" as first argument, and "axis" as named parameter. The old form, with "axis" as first parameter and "labels" as second, is still supported but will be deprecated in a future version of pandas.', FutureWarning, stacklevel=2)
(labels, axis) = (axis, labels)
if (inplace is None):
warnings.warn('set_axis currently defaults to operating inplace.\nThis will change in a future version of pandas, use inplace=True to avoid this warning.', FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
| 8,482,339,352,067,965,000 |
Assign desired index to given axis.
Indexes for column or row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to update. The value 0 identifies the rows, and 1
identifies the columns.
inplace : bool, default None
Whether to return a new %(klass)s instance.
.. warning::
``inplace=None`` currently falls back to to True, but in a
future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
Returns
-------
renamed : %(klass)s or None
An object of same type as caller if inplace=False, None otherwise.
See Also
--------
DataFrame.rename_axis : Alter the name of the index or columns.
Examples
--------
**Series**
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False)
a 1
b 2
c 3
dtype: int64
The original object is not modified.
>>> s
0 1
1 2
2 3
dtype: int64
**DataFrame**
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index', inplace=False)
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns', inplace=False)
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
|
pandas/core/generic.py
|
set_axis
|
kapilepatel/pandas
|
python
|
def set_axis(self, labels, axis=0, inplace=None):
'\n Assign desired index to given axis.\n\n Indexes for column or row labels can be changed by assigning\n a list-like or Index.\n\n .. versionchanged:: 0.21.0\n\n The signature is now `labels` and `axis`, consistent with\n the rest of pandas API. Previously, the `axis` and `labels`\n arguments were respectively the first and second positional\n arguments.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n The axis to update. The value 0 identifies the rows, and 1\n identifies the columns.\n\n inplace : bool, default None\n Whether to return a new %(klass)s instance.\n\n .. warning::\n\n ``inplace=None`` currently falls back to to True, but in a\n future version, will default to False. Use inplace=True\n explicitly rather than relying on the default.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of same type as caller if inplace=False, None otherwise.\n\n See Also\n --------\n DataFrame.rename_axis : Alter the name of the index or columns.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis([\'a\', \'b\', \'c\'], axis=0, inplace=False)\n a 1\n b 2\n c 3\n dtype: int64\n\n The original object is not modified.\n\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n **DataFrame**\n\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis([\'a\', \'b\', \'c\'], axis=\'index\', inplace=False)\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis([\'I\', \'II\'], axis=\'columns\', inplace=False)\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis([\'i\', \'ii\'], axis=\'columns\', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n '
if is_scalar(labels):
warnings.warn('set_axis now takes "labels" as first argument, and "axis" as named parameter. The old form, with "axis" as first parameter and "labels" as second, is still supported but will be deprecated in a future version of pandas.', FutureWarning, stacklevel=2)
(labels, axis) = (axis, labels)
if (inplace is None):
warnings.warn('set_axis currently defaults to operating inplace.\nThis will change in a future version of pandas, use inplace=True to avoid this warning.', FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
|
def transpose(self, *args, **kwargs):
'\n Permute the dimensions of the %(klass)s\n\n Parameters\n ----------\n args : %(args_transpose)s\n copy : boolean, default False\n Make a copy of the underlying data. Mixed-dtype data will\n always result in a copy\n **kwargs\n Additional keyword arguments will be passed to the function.\n\n Returns\n -------\n y : same as input\n\n Examples\n --------\n >>> p.transpose(2, 0, 1)\n >>> p.transpose(2, 0, 1, copy=True)\n '
(axes, kwargs) = self._construct_axes_from_arguments(args, kwargs, require_all=True)
axes_names = tuple((self._get_axis_name(axes[a]) for a in self._AXIS_ORDERS))
axes_numbers = tuple((self._get_axis_number(axes[a]) for a in self._AXIS_ORDERS))
if (len(axes) != len(set(axes))):
raise ValueError(('Must specify %s unique axes' % self._AXIS_LEN))
new_axes = self._construct_axes_dict_from(self, [self._get_axis(x) for x in axes_names])
new_values = self.values.transpose(axes_numbers)
if (kwargs.pop('copy', None) or (len(args) and args[(- 1)])):
new_values = new_values.copy()
nv.validate_transpose_for_generic(self, kwargs)
return self._constructor(new_values, **new_axes).__finalize__(self)
| 5,950,421,671,865,310,000 |
Permute the dimensions of the %(klass)s
Parameters
----------
args : %(args_transpose)s
copy : boolean, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
**kwargs
Additional keyword arguments will be passed to the function.
Returns
-------
y : same as input
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
|
pandas/core/generic.py
|
transpose
|
kapilepatel/pandas
|
python
|
def transpose(self, *args, **kwargs):
'\n Permute the dimensions of the %(klass)s\n\n Parameters\n ----------\n args : %(args_transpose)s\n copy : boolean, default False\n Make a copy of the underlying data. Mixed-dtype data will\n always result in a copy\n **kwargs\n Additional keyword arguments will be passed to the function.\n\n Returns\n -------\n y : same as input\n\n Examples\n --------\n >>> p.transpose(2, 0, 1)\n >>> p.transpose(2, 0, 1, copy=True)\n '
(axes, kwargs) = self._construct_axes_from_arguments(args, kwargs, require_all=True)
axes_names = tuple((self._get_axis_name(axes[a]) for a in self._AXIS_ORDERS))
axes_numbers = tuple((self._get_axis_number(axes[a]) for a in self._AXIS_ORDERS))
if (len(axes) != len(set(axes))):
raise ValueError(('Must specify %s unique axes' % self._AXIS_LEN))
new_axes = self._construct_axes_dict_from(self, [self._get_axis(x) for x in axes_names])
new_values = self.values.transpose(axes_numbers)
if (kwargs.pop('copy', None) or (len(args) and args[(- 1)])):
new_values = new_values.copy()
nv.validate_transpose_for_generic(self, kwargs)
return self._constructor(new_values, **new_axes).__finalize__(self)
|
def swapaxes(self, axis1, axis2, copy=True):
'\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n '
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if (i == j):
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
| 1,932,957,769,574,267,100 |
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
|
pandas/core/generic.py
|
swapaxes
|
kapilepatel/pandas
|
python
|
def swapaxes(self, axis1, axis2, copy=True):
'\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n '
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if (i == j):
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
|
def droplevel(self, level, axis=0):
"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame.droplevel()\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n "
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
| 8,172,893,012,025,901,000 |
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
DataFrame.droplevel()
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
|
pandas/core/generic.py
|
droplevel
|
kapilepatel/pandas
|
python
|
def droplevel(self, level, axis=0):
"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame.droplevel()\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n "
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
|
def pop(self, item):
"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n "
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
| -819,123,279,384,925,300 |
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
|
pandas/core/generic.py
|
pop
|
kapilepatel/pandas
|
python
|
def pop(self, item):
"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey','mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n "
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
|
def squeeze(self, axis=None):
"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes wil project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n "
axis = (self._AXIS_NAMES if (axis is None) else (self._get_axis_number(axis),))
try:
return self.iloc[tuple(((0 if ((i in axis) and (len(a) == 1)) else slice(None)) for (i, a) in enumerate(self.axes)))]
except Exception:
return self
| 9,216,137,054,533,155,000 |
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
.. versionadded:: 0.20.0
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes wil project directly into a scalar:
>>> df_0a.squeeze()
1
|
pandas/core/generic.py
|
squeeze
|
kapilepatel/pandas
|
python
|
def squeeze(self, axis=None):
"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes wil project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n "
axis = (self._AXIS_NAMES if (axis is None) else (self._get_axis_number(axis),))
try:
return self.iloc[tuple(((0 if ((i in axis) and (len(a) == 1)) else slice(None)) for (i, a) in enumerate(self.axes)))]
except Exception:
return self
|
def swaplevel(self, i=(- 2), j=(- 1), axis=0):
'\n Swap levels i and j in a MultiIndex on a particular axis\n\n Parameters\n ----------\n i, j : int, str (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : same type as caller (new object)\n\n .. versionchanged:: 0.18.1\n\n The indexes ``i`` and ``j`` are now optional, and default to\n the two innermost levels of the index.\n\n '
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
| -4,580,033,766,606,492,700 |
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
|
pandas/core/generic.py
|
swaplevel
|
kapilepatel/pandas
|
python
|
def swaplevel(self, i=(- 2), j=(- 1), axis=0):
'\n Swap levels i and j in a MultiIndex on a particular axis\n\n Parameters\n ----------\n i, j : int, str (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : same type as caller (new object)\n\n .. versionchanged:: 0.18.1\n\n The indexes ``i`` and ``j`` are now optional, and default to\n the two innermost levels of the index.\n\n '
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
|
def rename(self, *args, **kwargs):
'\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don\'t throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame or Panel.\n dict-like or functions are transformations to apply to\n that axis\' values\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new %(klass)s. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {\'ignore\', \'raise\'}, default \'ignore\'\n If \'raise\', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If \'ignore\', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n renamed : %(klass)s (new object)\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n "errors=\'raise\'".\n\n See Also\n --------\n NDFrame.rename_axis\n\n Examples\n --------\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename("my_name") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn\'t have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: \'int\' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={\'index\', \'columns\'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={"A": "a", "B": "c"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={"A": "a", "C": "c"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis=\'columns\')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis=\'index\')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n '
(axes, kwargs) = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
level = kwargs.pop('level', None)
axis = kwargs.pop('axis', None)
errors = kwargs.pop('errors', 'ignore')
if (axis is not None):
self._get_axis_number(axis)
if kwargs:
raise TypeError('rename() got an unexpected keyword argument "{0}"'.format(list(kwargs.keys())[0]))
if (com.count_not_none(*axes.values()) == 0):
raise TypeError('must pass an index to rename')
self._consolidate_inplace()
result = (self if inplace else self.copy(deep=copy))
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if (v is None):
continue
f = com._get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if (level is not None):
level = self.axes[axis]._get_level_number(level)
if (not callable(v)):
indexer = self.axes[axis].get_indexer_for(v)
if ((errors == 'raise') and len(indexer[(indexer == (- 1))])):
missing_labels = [label for (index, label) in enumerate(v) if (indexer[index] == (- 1))]
raise KeyError('{} not found in axis'.format(missing_labels))
result._data = result._data.rename_axis(f, axis=baxis, copy=copy, level=level)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
| 5,026,194,608,817,388,000 |
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame or Panel.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
|
pandas/core/generic.py
|
rename
|
kapilepatel/pandas
|
python
|
def rename(self, *args, **kwargs):
'\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don\'t throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame or Panel.\n dict-like or functions are transformations to apply to\n that axis\' values\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new %(klass)s. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {\'ignore\', \'raise\'}, default \'ignore\'\n If \'raise\', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If \'ignore\', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n renamed : %(klass)s (new object)\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n "errors=\'raise\'".\n\n See Also\n --------\n NDFrame.rename_axis\n\n Examples\n --------\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename("my_name") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn\'t have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: \'int\' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={\'index\', \'columns\'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={"A": "a", "B": "c"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={"A": "a", "C": "c"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis=\'columns\')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis=\'index\')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n '
(axes, kwargs) = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
level = kwargs.pop('level', None)
axis = kwargs.pop('axis', None)
errors = kwargs.pop('errors', 'ignore')
if (axis is not None):
self._get_axis_number(axis)
if kwargs:
raise TypeError('rename() got an unexpected keyword argument "{0}"'.format(list(kwargs.keys())[0]))
if (com.count_not_none(*axes.values()) == 0):
raise TypeError('must pass an index to rename')
self._consolidate_inplace()
result = (self if inplace else self.copy(deep=copy))
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if (v is None):
continue
f = com._get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if (level is not None):
level = self.axes[axis]._get_level_number(level)
if (not callable(v)):
indexer = self.axes[axis].get_indexer_for(v)
if ((errors == 'raise') and len(indexer[(indexer == (- 1))])):
missing_labels = [label for (index, label) in enumerate(v) if (indexer[index] == (- 1))]
raise KeyError('{} not found in axis'.format(missing_labels))
result._data = result._data.rename_axis(f, axis=baxis, copy=copy, level=level)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
|
@rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False)])
def rename_axis(self, mapper=sentinel, **kwargs):
'\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis\' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n Prior to version 0.21.0, ``rename_axis`` could also be used to change\n the axis *labels* by passing a mapping or scalar. This behavior is\n deprecated and will be removed in a future version. Use ``rename``\n instead.\n\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={\'index\', \'columns\'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series(["dog", "cat", "monkey"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis("animal")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({"num_legs": [4, 4, 2],\n ... "num_arms": [0, 0, 2]},\n ... ["dog", "cat", "monkey"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis("animal")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis("limbs", axis="columns")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([[\'mammal\'],\n ... [\'dog\', \'cat\', \'monkey\']],\n ... names=[\'type\', \'name\'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={\'type\': \'class\'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n '
(axes, kwargs) = self._construct_axes_from_arguments((), kwargs, sentinel=sentinel)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
axis = kwargs.pop('axis', 0)
if (axis is not None):
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError('rename_axis() got an unexpected keyword argument "{0}"'.format(list(kwargs.keys())[0]))
inplace = validate_bool_kwarg(inplace, 'inplace')
if (mapper is not sentinel):
non_mapper = (is_scalar(mapper) or (is_list_like(mapper) and (not is_dict_like(mapper))))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
msg = "Using 'rename_axis' to alter labels is deprecated. Use '.rename' instead"
warnings.warn(msg, FutureWarning, stacklevel=3)
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
else:
result = (self if inplace else self.copy(deep=copy))
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if (v is sentinel):
continue
non_mapper = (is_scalar(v) or (is_list_like(v) and (not is_dict_like(v))))
if non_mapper:
newnames = v
else:
f = com._get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if (not inplace):
return result
| 4,914,523,485,724,767,000 |
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
the axis *labels* by passing a mapping or scalar. This behavior is
deprecated and will be removed in a future version. Use ``rename``
instead.
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
|
pandas/core/generic.py
|
rename_axis
|
kapilepatel/pandas
|
python
|
@rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False)])
def rename_axis(self, mapper=sentinel, **kwargs):
'\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis\' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n Prior to version 0.21.0, ``rename_axis`` could also be used to change\n the axis *labels* by passing a mapping or scalar. This behavior is\n deprecated and will be removed in a future version. Use ``rename``\n instead.\n\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={\'index\', \'columns\'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series(["dog", "cat", "monkey"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis("animal")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({"num_legs": [4, 4, 2],\n ... "num_arms": [0, 0, 2]},\n ... ["dog", "cat", "monkey"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis("animal")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis("limbs", axis="columns")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([[\'mammal\'],\n ... [\'dog\', \'cat\', \'monkey\']],\n ... names=[\'type\', \'name\'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={\'type\': \'class\'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n '
(axes, kwargs) = self._construct_axes_from_arguments((), kwargs, sentinel=sentinel)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
axis = kwargs.pop('axis', 0)
if (axis is not None):
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError('rename_axis() got an unexpected keyword argument "{0}"'.format(list(kwargs.keys())[0]))
inplace = validate_bool_kwarg(inplace, 'inplace')
if (mapper is not sentinel):
non_mapper = (is_scalar(mapper) or (is_list_like(mapper) and (not is_dict_like(mapper))))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
msg = "Using 'rename_axis' to alter labels is deprecated. Use '.rename' instead"
warnings.warn(msg, FutureWarning, stacklevel=3)
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
else:
result = (self if inplace else self.copy(deep=copy))
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if (v is sentinel):
continue
non_mapper = (is_scalar(v) or (is_list_like(v) and (not is_dict_like(v))))
if non_mapper:
newnames = v
else:
f = com._get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if (not inplace):
return result
|
def _set_axis_name(self, name, axis=0, inplace=False):
'\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n The axis to set the label. The value 0 or \'index\' specifies index,\n and the value 1 or \'columns\' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({"num_legs": [4, 4, 2]},\n ... ["dog", "cat", "monkey"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name("animal")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [["mammal"], [\'dog\', \'cat\', \'monkey\']])\n >>> df._set_axis_name(["type", "name"])\n legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n '
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, 'inplace')
renamed = (self if inplace else self.copy())
renamed.set_axis(idx, axis=axis, inplace=True)
if (not inplace):
return renamed
| -3,503,825,494,571,687,400 |
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
|
pandas/core/generic.py
|
_set_axis_name
|
kapilepatel/pandas
|
python
|
def _set_axis_name(self, name, axis=0, inplace=False):
'\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n The axis to set the label. The value 0 or \'index\' specifies index,\n and the value 1 or \'columns\' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({"num_legs": [4, 4, 2]},\n ... ["dog", "cat", "monkey"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name("animal")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [["mammal"], [\'dog\', \'cat\', \'monkey\']])\n >>> df._set_axis_name(["type", "name"])\n legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n '
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, 'inplace')
renamed = (self if inplace else self.copy())
renamed.set_axis(idx, axis=axis, inplace=True)
if (not inplace):
return renamed
|
def equals(self, other):
'\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n assert_series_equal : Return True if left and right Series are equal,\n False otherwise.\n assert_frame_equal : Return True if left and right DataFrames are\n equal, False otherwise.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n '
if (not isinstance(other, self._constructor)):
return False
return self._data.equals(other._data)
| -7,646,363,434,194,578,000 |
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
assert_series_equal : Return True if left and right Series are equal,
False otherwise.
assert_frame_equal : Return True if left and right DataFrames are
equal, False otherwise.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
|
pandas/core/generic.py
|
equals
|
kapilepatel/pandas
|
python
|
def equals(self, other):
'\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n assert_series_equal : Return True if left and right Series are equal,\n False otherwise.\n assert_frame_equal : Return True if left and right DataFrames are\n equal, False otherwise.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n '
if (not isinstance(other, self._constructor)):
return False
return self._data.equals(other._data)
|
def bool(self):
'\n Return the bool of a single element PandasObject.\n\n This must be a boolean scalar value, either True or False. Raise a\n ValueError if the PandasObject does not have exactly 1 element, or that\n element is not boolean\n '
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError('bool cannot act on a non-boolean single element {0}'.format(self.__class__.__name__))
self.__nonzero__()
| -5,600,088,005,043,880,000 |
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
|
pandas/core/generic.py
|
bool
|
kapilepatel/pandas
|
python
|
def bool(self):
'\n Return the bool of a single element PandasObject.\n\n This must be a boolean scalar value, either True or False. Raise a\n ValueError if the PandasObject does not have exactly 1 element, or that\n element is not boolean\n '
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError('bool cannot act on a non-boolean single element {0}'.format(self.__class__.__name__))
self.__nonzero__()
|
def _is_level_reference(self, key, axis=0):
'\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n '
axis = self._get_axis_number(axis)
if (self.ndim > 2):
raise NotImplementedError('_is_level_reference is not implemented for {type}'.format(type=type(self)))
return ((key is not None) and is_hashable(key) and (key in self.axes[axis].names) and (not self._is_label_reference(key, axis=axis)))
| 5,860,233,145,217,881,000 |
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
|
pandas/core/generic.py
|
_is_level_reference
|
kapilepatel/pandas
|
python
|
def _is_level_reference(self, key, axis=0):
'\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n '
axis = self._get_axis_number(axis)
if (self.ndim > 2):
raise NotImplementedError('_is_level_reference is not implemented for {type}'.format(type=type(self)))
return ((key is not None) and is_hashable(key) and (key in self.axes[axis].names) and (not self._is_label_reference(key, axis=axis)))
|
def _is_label_reference(self, key, axis=0):
'\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n '
if (self.ndim > 2):
raise NotImplementedError('_is_label_reference is not implemented for {type}'.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if (ax != axis))
return ((key is not None) and is_hashable(key) and any(((key in self.axes[ax]) for ax in other_axes)))
| 7,299,412,412,023,767,000 |
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
|
pandas/core/generic.py
|
_is_label_reference
|
kapilepatel/pandas
|
python
|
def _is_label_reference(self, key, axis=0):
'\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n '
if (self.ndim > 2):
raise NotImplementedError('_is_label_reference is not implemented for {type}'.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if (ax != axis))
return ((key is not None) and is_hashable(key) and any(((key in self.axes[ax]) for ax in other_axes)))
|
def _is_label_or_level_reference(self, key, axis=0):
'\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n '
if (self.ndim > 2):
raise NotImplementedError('_is_label_or_level_reference is not implemented for {type}'.format(type=type(self)))
return (self._is_level_reference(key, axis=axis) or self._is_label_reference(key, axis=axis))
| 1,784,305,677,259,383,000 |
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
|
pandas/core/generic.py
|
_is_label_or_level_reference
|
kapilepatel/pandas
|
python
|
def _is_label_or_level_reference(self, key, axis=0):
'\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n '
if (self.ndim > 2):
raise NotImplementedError('_is_label_or_level_reference is not implemented for {type}'.format(type=type(self)))
return (self._is_level_reference(key, axis=axis) or self._is_label_reference(key, axis=axis))
|
def _check_label_or_level_ambiguity(self, key, axis=0):
'\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Raises\n ------\n ValueError: `key` is ambiguous\n '
if (self.ndim > 2):
raise NotImplementedError('_check_label_or_level_ambiguity is not implemented for {type}'.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if (ax != axis))
if ((key is not None) and is_hashable(key) and (key in self.axes[axis].names) and any(((key in self.axes[ax]) for ax in other_axes))):
(level_article, level_type) = (('an', 'index') if (axis == 0) else ('a', 'column'))
(label_article, label_type) = (('a', 'column') if (axis == 0) else ('an', 'index'))
msg = "'{key}' is both {level_article} {level_type} level and {label_article} {label_type} label, which is ambiguous.".format(key=key, level_article=level_article, level_type=level_type, label_article=label_article, label_type=label_type)
raise ValueError(msg)
| -731,676,864,510,409,500 |
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
|
pandas/core/generic.py
|
_check_label_or_level_ambiguity
|
kapilepatel/pandas
|
python
|
def _check_label_or_level_ambiguity(self, key, axis=0):
'\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Raises\n ------\n ValueError: `key` is ambiguous\n '
if (self.ndim > 2):
raise NotImplementedError('_check_label_or_level_ambiguity is not implemented for {type}'.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if (ax != axis))
if ((key is not None) and is_hashable(key) and (key in self.axes[axis].names) and any(((key in self.axes[ax]) for ax in other_axes))):
(level_article, level_type) = (('an', 'index') if (axis == 0) else ('a', 'column'))
(label_article, label_type) = (('a', 'column') if (axis == 0) else ('an', 'index'))
msg = "'{key}' is both {level_article} {level_type} level and {label_article} {label_type} label, which is ambiguous.".format(key=key, level_article=level_article, level_type=level_type, label_article=label_article, label_type=label_type)
raise ValueError(msg)
|
def _get_label_or_level_values(self, key, axis=0):
"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n "
if (self.ndim > 2):
raise NotImplementedError('_get_label_or_level_values is not implemented for {type}'.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if (ax != axis)]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
if (values.ndim > 1):
if (other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex)):
multi_message = '\nFor a multi-index, the label must be a tuple with elements corresponding to each level.'
else:
multi_message = ''
label_axis_name = ('column' if (axis == 0) else 'index')
raise ValueError("The {label_axis_name} label '{key}' is not unique.{multi_message}".format(key=key, label_axis_name=label_axis_name, multi_message=multi_message))
return values
| -345,589,549,459,989,760 |
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
|
pandas/core/generic.py
|
_get_label_or_level_values
|
kapilepatel/pandas
|
python
|
def _get_label_or_level_values(self, key, axis=0):
"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n "
if (self.ndim > 2):
raise NotImplementedError('_get_label_or_level_values is not implemented for {type}'.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if (ax != axis)]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
if (values.ndim > 1):
if (other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex)):
multi_message = '\nFor a multi-index, the label must be a tuple with elements corresponding to each level.'
else:
multi_message =
label_axis_name = ('column' if (axis == 0) else 'index')
raise ValueError("The {label_axis_name} label '{key}' is not unique.{multi_message}".format(key=key, label_axis_name=label_axis_name, multi_message=multi_message))
return values
|
def _drop_labels_or_levels(self, keys, axis=0):
'\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n '
if (self.ndim > 2):
raise NotImplementedError('_drop_labels_or_levels is not implemented for {type}'.format(type=type(self)))
axis = self._get_axis_number(axis)
keys = com.maybe_make_list(keys)
invalid_keys = [k for k in keys if (not self._is_label_or_level_reference(k, axis=axis))]
if invalid_keys:
raise ValueError('The following keys are not valid labels or levels for axis {axis}: {invalid_keys}'.format(axis=axis, invalid_keys=invalid_keys))
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if (not self._is_level_reference(k, axis=axis))]
dropped = self.copy()
if (axis == 0):
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
dropped.columns = RangeIndex(dropped.columns.size)
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
| -2,654,759,804,545,341,400 |
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
|
pandas/core/generic.py
|
_drop_labels_or_levels
|
kapilepatel/pandas
|
python
|
def _drop_labels_or_levels(self, keys, axis=0):
'\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n '
if (self.ndim > 2):
raise NotImplementedError('_drop_labels_or_levels is not implemented for {type}'.format(type=type(self)))
axis = self._get_axis_number(axis)
keys = com.maybe_make_list(keys)
invalid_keys = [k for k in keys if (not self._is_label_or_level_reference(k, axis=axis))]
if invalid_keys:
raise ValueError('The following keys are not valid labels or levels for axis {axis}: {invalid_keys}'.format(axis=axis, invalid_keys=invalid_keys))
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if (not self._is_level_reference(k, axis=axis))]
dropped = self.copy()
if (axis == 0):
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
dropped.columns = RangeIndex(dropped.columns.size)
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
|
def __iter__(self):
'Iterate over info axis'
return iter(self._info_axis)
| -2,233,122,514,471,977,500 |
Iterate over info axis
|
pandas/core/generic.py
|
__iter__
|
kapilepatel/pandas
|
python
|
def __iter__(self):
return iter(self._info_axis)
|
def keys(self):
"Get the 'info axis' (see Indexing for more)\n\n This is index for Series, columns for DataFrame and major_axis for\n Panel.\n "
return self._info_axis
| 5,350,577,501,278,983,000 |
Get the 'info axis' (see Indexing for more)
This is index for Series, columns for DataFrame and major_axis for
Panel.
|
pandas/core/generic.py
|
keys
|
kapilepatel/pandas
|
python
|
def keys(self):
"Get the 'info axis' (see Indexing for more)\n\n This is index for Series, columns for DataFrame and major_axis for\n Panel.\n "
return self._info_axis
|
def iteritems(self):
'Iterate over (label, values) on info axis\n\n This is index for Series, columns for DataFrame, major_axis for Panel,\n and so on.\n '
for h in self._info_axis:
(yield (h, self[h]))
| -8,959,623,205,265,551,000 |
Iterate over (label, values) on info axis
This is index for Series, columns for DataFrame, major_axis for Panel,
and so on.
|
pandas/core/generic.py
|
iteritems
|
kapilepatel/pandas
|
python
|
def iteritems(self):
'Iterate over (label, values) on info axis\n\n This is index for Series, columns for DataFrame, major_axis for Panel,\n and so on.\n '
for h in self._info_axis:
(yield (h, self[h]))
|
def __len__(self):
'Returns length of info axis'
return len(self._info_axis)
| 2,516,517,228,926,761,500 |
Returns length of info axis
|
pandas/core/generic.py
|
__len__
|
kapilepatel/pandas
|
python
|
def __len__(self):
return len(self._info_axis)
|
def __contains__(self, key):
'True if the key is in the info axis'
return (key in self._info_axis)
| 3,148,144,458,199,314,000 |
True if the key is in the info axis
|
pandas/core/generic.py
|
__contains__
|
kapilepatel/pandas
|
python
|
def __contains__(self, key):
return (key in self._info_axis)
|
@property
def empty(self):
"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n Series.dropna\n DataFrame.dropna\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n "
return any(((len(self._get_axis(a)) == 0) for a in self._AXIS_ORDERS))
| 7,124,979,622,633,238,000 |
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
|
pandas/core/generic.py
|
empty
|
kapilepatel/pandas
|
python
|
@property
def empty(self):
"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n Series.dropna\n DataFrame.dropna\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n "
return any(((len(self._get_axis(a)) == 0) for a in self._AXIS_ORDERS))
|
def to_dense(self):
'\n Return dense representation of NDFrame (as opposed to sparse).\n '
return self
| -8,314,391,330,797,823,000 |
Return dense representation of NDFrame (as opposed to sparse).
|
pandas/core/generic.py
|
to_dense
|
kapilepatel/pandas
|
python
|
def to_dense(self):
'\n \n '
return self
|
def _repr_latex_(self):
'\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n '
if config.get_option('display.latex.repr'):
return self.to_latex()
else:
return None
| 3,186,633,606,793,060,000 |
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
|
pandas/core/generic.py
|
_repr_latex_
|
kapilepatel/pandas
|
python
|
def _repr_latex_(self):
'\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n '
if config.get_option('display.latex.repr'):
return self.to_latex()
else:
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.