Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def describe_trainable_vars():
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if len(train_vars) == 0:
logger.warn("No trainable variables in the graph!")
return
total = 0
total_bytes = 0
data = []
for v in train_vars:
if v.name.startswith('tower'):
continue
shape = v.get_shape()
ele = shape.num_elements()
if ele is None:
logger.warn("Shape of variable {} is not fully defined but {}.".format(v.name, shape))
ele = 0
try:
shape = shape.as_list()
except ValueError:
shape = '<unknown>'
total += ele
total_bytes += ele * v.dtype.size
data.append([v.name, shape, ele, v.device, v.dtype.base_dtype.name])
headers = ['name', 'shape', '#elements', 'device', 'dtype']
dtypes = list(set([x[4] for x in data]))
if len(dtypes) == 1 and dtypes[0] == "float32":
# don't log the dtype if all vars are float32 (default dtype)
for x in data:
del x[4]
del headers[4]
devices = set([x[3] for x in data])
if len(devices) == 1:
# don't log the device if all vars on the same device
for x in data:
del x[3]
del headers[3]
table = tabulate(data, headers=headers)
size_mb = total_bytes / 1024.0**2
summary_msg = colored(
"\nNumber of trainable variables: {}".format(len(data)) +
"\nNumber of parameters (elements): {}".format(total) +
"\nStorage space needed for all trainable variables: {:.02f}MB".format(size_mb),
'cyan')
logger.info(colored("List of Trainable Variables: \n", 'cyan') + table + summary_msg)
|
[
"\n Print a description of the current model parameters.\n Skip variables starting with \"tower\", as they are just duplicates built by data-parallel logic.\n "
] |
Please provide a description of the function:def get_shape_str(tensors):
if isinstance(tensors, (list, tuple)):
for v in tensors:
assert isinstance(v, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(v))
shape_str = ",".join(
map(lambda x: str(x.get_shape().as_list()), tensors))
else:
assert isinstance(tensors, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(tensors))
shape_str = str(tensors.get_shape().as_list())
return shape_str
|
[
"\n Internally used by layer registry, to print shapes of inputs/outputs of layers.\n\n Args:\n tensors (list or tf.Tensor): a tensor or a list of tensors\n Returns:\n str: a string to describe the shape\n "
] |
Please provide a description of the function:def contrastive_loss(left, right, y, margin, extra=False, scope="constrastive_loss"):
r
with tf.name_scope(scope):
y = tf.cast(y, tf.float32)
delta = tf.reduce_sum(tf.square(left - right), 1)
delta_sqrt = tf.sqrt(delta + 1e-10)
match_loss = delta
missmatch_loss = tf.square(tf.nn.relu(margin - delta_sqrt))
loss = tf.reduce_mean(0.5 * (y * match_loss + (1 - y) * missmatch_loss))
if extra:
num_pos = tf.count_nonzero(y)
num_neg = tf.count_nonzero(1 - y)
pos_dist = tf.where(tf.equal(num_pos, 0), 0.,
tf.reduce_sum(y * delta_sqrt) / tf.cast(num_pos, tf.float32),
name="pos-dist")
neg_dist = tf.where(tf.equal(num_neg, 0), 0.,
tf.reduce_sum((1 - y) * delta_sqrt) / tf.cast(num_neg, tf.float32),
name="neg-dist")
return loss, pos_dist, neg_dist
else:
return loss
|
[
"Loss for Siamese networks as described in the paper:\n `Learning a Similarity Metric Discriminatively, with Application to Face\n Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.\n\n .. math::\n \\frac{1}{2} [y \\cdot d^2 + (1-y) \\cdot \\max(0, m - d)^2], d = \\Vert l - r \\Vert_2\n\n Args:\n left (tf.Tensor): left feature vectors of shape [Batch, N].\n right (tf.Tensor): right feature vectors of shape [Batch, N].\n y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.\n margin (float): horizon for negative examples (y==0).\n extra (bool): also return distances for pos and neg.\n\n Returns:\n tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)\n "
] |
Please provide a description of the function:def siamese_cosine_loss(left, right, y, scope="cosine_loss"):
r
def l2_norm(t, eps=1e-12):
with tf.name_scope("l2_norm"):
return tf.sqrt(tf.reduce_sum(tf.square(t), 1) + eps)
with tf.name_scope(scope):
y = 2 * tf.cast(y, tf.float32) - 1
pred = tf.reduce_sum(left * right, 1) / (l2_norm(left) * l2_norm(right) + 1e-10)
return tf.nn.l2_loss(y - pred) / tf.cast(tf.shape(left)[0], tf.float32)
|
[
"Loss for Siamese networks (cosine version).\n Same as :func:`contrastive_loss` but with different similarity measurement.\n\n .. math::\n [\\frac{l \\cdot r}{\\lVert l\\rVert \\lVert r\\rVert} - (2y-1)]^2\n\n Args:\n left (tf.Tensor): left feature vectors of shape [Batch, N].\n right (tf.Tensor): right feature vectors of shape [Batch, N].\n y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.\n\n Returns:\n tf.Tensor: cosine-loss as a scalar tensor.\n ",
"\n Returns:\n tf.Tensor: norm of 2D input tensor on axis 1\n "
] |
Please provide a description of the function:def triplet_loss(anchor, positive, negative, margin, extra=False, scope="triplet_loss"):
r
with tf.name_scope(scope):
d_pos = tf.reduce_sum(tf.square(anchor - positive), 1)
d_neg = tf.reduce_sum(tf.square(anchor - negative), 1)
loss = tf.reduce_mean(tf.maximum(0., margin + d_pos - d_neg))
if extra:
pos_dist = tf.reduce_mean(tf.sqrt(d_pos + 1e-10), name='pos-dist')
neg_dist = tf.reduce_mean(tf.sqrt(d_neg + 1e-10), name='neg-dist')
return loss, pos_dist, neg_dist
else:
return loss
|
[
"Loss for Triplet networks as described in the paper:\n `FaceNet: A Unified Embedding for Face Recognition and Clustering\n <https://arxiv.org/abs/1503.03832>`_\n by Schroff et al.\n\n Learn embeddings from an anchor point and a similar input (positive) as\n well as a not-similar input (negative).\n Intuitively, a matching pair (anchor, positive) should have a smaller relative distance\n than a non-matching pair (anchor, negative).\n\n .. math::\n \\max(0, m + \\Vert a-p\\Vert^2 - \\Vert a-n\\Vert^2)\n\n Args:\n anchor (tf.Tensor): anchor feature vectors of shape [Batch, N].\n positive (tf.Tensor): features of positive match of the same shape.\n negative (tf.Tensor): features of negative match of the same shape.\n margin (float): horizon for negative examples\n extra (bool): also return distances for pos and neg.\n\n Returns:\n tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)\n "
] |
Please provide a description of the function:def soft_triplet_loss(anchor, positive, negative, extra=True, scope="soft_triplet_loss"):
r
eps = 1e-10
with tf.name_scope(scope):
d_pos = tf.sqrt(tf.reduce_sum(tf.square(anchor - positive), 1) + eps)
d_neg = tf.sqrt(tf.reduce_sum(tf.square(anchor - negative), 1) + eps)
logits = tf.stack([d_pos, d_neg], axis=1)
ones = tf.ones_like(tf.squeeze(d_pos), dtype="int32")
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=ones))
if extra:
pos_dist = tf.reduce_mean(d_pos, name='pos-dist')
neg_dist = tf.reduce_mean(d_neg, name='neg-dist')
return loss, pos_dist, neg_dist
else:
return loss
|
[
"Loss for triplet networks as described in the paper:\n `Deep Metric Learning using Triplet Network\n <https://arxiv.org/abs/1412.6622>`_ by Hoffer et al.\n\n It is a softmax loss using :math:`(anchor-positive)^2` and\n :math:`(anchor-negative)^2` as logits.\n\n Args:\n anchor (tf.Tensor): anchor feature vectors of shape [Batch, N].\n positive (tf.Tensor): features of positive match of the same shape.\n negative (tf.Tensor): features of negative match of the same shape.\n extra (bool): also return distances for pos and neg.\n\n Returns:\n tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)\n "
] |
Please provide a description of the function:def center_loss(embedding, label, num_classes, alpha=0.1, scope="center_loss"):
r
nrof_features = embedding.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alpha) * (centers_batch - embedding)
centers = tf.scatter_sub(centers, label, diff)
loss = tf.reduce_mean(tf.square(embedding - centers_batch), name=scope)
return loss
|
[
"Center-Loss as described in the paper\n `A Discriminative Feature Learning Approach for Deep Face Recognition`\n <http://ydwen.github.io/papers/WenECCV16.pdf> by Wen et al.\n\n Args:\n embedding (tf.Tensor): features produced by the network\n label (tf.Tensor): ground-truth label for each feature\n num_classes (int): number of different classes\n alpha (float): learning rate for updating the centers\n\n Returns:\n tf.Tensor: center loss\n "
] |
Please provide a description of the function:def embed(self, x, nfeatures=2):
list_split = 0
if isinstance(x, list):
list_split = len(x)
x = tf.concat(x, 0)
# pre-process MNIST dataflow data
x = tf.expand_dims(x, 3)
x = x * 2 - 1
# the embedding network
net = slim.layers.conv2d(x, 20, 5, scope='conv1')
net = slim.layers.max_pool2d(net, 2, scope='pool1')
net = slim.layers.conv2d(net, 50, 5, scope='conv2')
net = slim.layers.max_pool2d(net, 2, scope='pool2')
net = slim.layers.flatten(net, scope='flatten3')
net = slim.layers.fully_connected(net, 500, scope='fully_connected4')
embeddings = slim.layers.fully_connected(net, nfeatures, activation_fn=None, scope='fully_connected5')
# if "x" was a list of tensors, then split the embeddings
if list_split > 0:
embeddings = tf.split(embeddings, list_split, 0)
return embeddings
|
[
"Embed all given tensors into an nfeatures-dim space. "
] |
Please provide a description of the function:def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2**np.arange(3, 6)):
base_anchor = np.array([1, 1, base_size, base_size], dtype='float32') - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors
|
[
"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, 15, 15) window.\n "
] |
Please provide a description of the function:def build_graph(self, image, label):
# In tensorflow, inputs to convolution function are assumed to be
# NHWC. Add a single channel here.
image = tf.expand_dims(image, 3)
image = image * 2 - 1 # center the pixels values at zero
# The context manager `argscope` sets the default option for all the layers under
# this context. Here we use 32 channel convolution with shape 3x3
with argscope([tf.layers.conv2d], padding='same', activation=tf.nn.relu):
l = tf.layers.conv2d(image, 32, 3, name='conv0')
l = tf.layers.max_pooling2d(l, 2, 2, padding='valid')
l = tf.layers.conv2d(l, 32, 3, name='conv1')
l = tf.layers.conv2d(l, 32, 3, name='conv2')
l = tf.layers.max_pooling2d(l, 2, 2, padding='valid')
l = tf.layers.conv2d(l, 32, 3, name='conv3')
l = tf.layers.flatten(l)
l = tf.layers.dense(l, 512, activation=tf.nn.relu, name='fc0')
l = tf.layers.dropout(l, rate=0.5,
training=get_current_tower_context().is_training)
logits = tf.layers.dense(l, 10, activation=tf.identity, name='fc1')
# a vector of length B with loss of each sample
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
correct = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32, name='correct')
accuracy = tf.reduce_mean(correct, name='accuracy')
# This will monitor training error & accuracy (in a moving average fashion). The value will be automatically
# 1. written to tensosrboard
# 2. written to stat.json
# 3. printed after each epoch
train_error = tf.reduce_mean(1 - correct, name='train_error')
summary.add_moving_summary(train_error, accuracy)
# Use a regex to find parameters to apply weight decay.
# Here we apply a weight decay on all W (weight matrix) of all fc layers
# If you don't like regex, you can certainly define the cost in any other methods.
wd_cost = tf.multiply(1e-5,
regularize_cost('fc.*/kernel', tf.nn.l2_loss),
name='regularize_loss')
total_cost = tf.add_n([wd_cost, cost], name='total_cost')
summary.add_moving_summary(cost, wd_cost, total_cost)
# monitor histogram of all weight (of conv and fc layers) in tensorboard
summary.add_param_summary(('.*/kernel', ['histogram', 'rms']))
# the function should return the total cost to be optimized
return total_cost
|
[
"This function should build the model which takes the input variables\n and return cost at the end"
] |
Please provide a description of the function:def print_class_histogram(roidbs):
dataset = DetectionDataset()
hist_bins = np.arange(dataset.num_classes + 1)
# Histogram of ground-truth objects
gt_hist = np.zeros((dataset.num_classes,), dtype=np.int)
for entry in roidbs:
# filter crowd?
gt_inds = np.where(
(entry['class'] > 0) & (entry['is_crowd'] == 0))[0]
gt_classes = entry['class'][gt_inds]
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
data = [[dataset.class_names[i], v] for i, v in enumerate(gt_hist)]
data.append(['total', sum(x[1] for x in data)])
# the first line is BG
table = tabulate(data[1:], headers=['class', '#box'], tablefmt='pipe')
logger.info("Ground-Truth Boxes:\n" + colored(table, 'cyan'))
|
[
"\n Args:\n roidbs (list[dict]): the same format as the output of `load_training_roidbs`.\n "
] |
Please provide a description of the function:def get_all_anchors(stride=None, sizes=None):
if stride is None:
stride = cfg.RPN.ANCHOR_STRIDE
if sizes is None:
sizes = cfg.RPN.ANCHOR_SIZES
# Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
# are centered on stride / 2, have (approximate) sqrt areas of the specified
# sizes, and aspect ratios as given.
cell_anchors = generate_anchors(
stride,
scales=np.array(sizes, dtype=np.float) / stride,
ratios=np.array(cfg.RPN.ANCHOR_RATIOS, dtype=np.float))
# anchors are intbox here.
# anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride)
max_size = cfg.PREPROC.MAX_SIZE
field_size = int(np.ceil(max_size / stride))
shifts = np.arange(0, field_size) * stride
shift_x, shift_y = np.meshgrid(shifts, shifts)
shift_x = shift_x.flatten()
shift_y = shift_y.flatten()
shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()
# Kx4, K = field_size * field_size
K = shifts.shape[0]
A = cell_anchors.shape[0]
field_of_anchors = (
cell_anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))
# FSxFSxAx4
# Many rounding happens inside the anchor code anyway
# assert np.all(field_of_anchors == field_of_anchors.astype('int32'))
field_of_anchors = field_of_anchors.astype('float32')
field_of_anchors[:, :, :, [2, 3]] += 1
return field_of_anchors
|
[
"\n Get all anchors in the largest possible image, shifted, floatbox\n Args:\n stride (int): the stride of anchors.\n sizes (tuple[int]): the sizes (sqrt area) of anchors\n\n Returns:\n anchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox\n The layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE.\n\n "
] |
Please provide a description of the function:def get_all_anchors_fpn(strides=None, sizes=None):
if strides is None:
strides = cfg.FPN.ANCHOR_STRIDES
if sizes is None:
sizes = cfg.RPN.ANCHOR_SIZES
assert len(strides) == len(sizes)
foas = []
for stride, size in zip(strides, sizes):
foa = get_all_anchors(stride=stride, sizes=(size,))
foas.append(foa)
return foas
|
[
"\n Returns:\n [anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array.\n "
] |
Please provide a description of the function:def get_anchor_labels(anchors, gt_boxes, crowd_boxes):
# This function will modify labels and return the filtered inds
def filter_box_label(labels, value, max_num):
curr_inds = np.where(labels == value)[0]
if len(curr_inds) > max_num:
disable_inds = np.random.choice(
curr_inds, size=(len(curr_inds) - max_num),
replace=False)
labels[disable_inds] = -1 # ignore them
curr_inds = np.where(labels == value)[0]
return curr_inds
NA, NB = len(anchors), len(gt_boxes)
assert NB > 0 # empty images should have been filtered already
box_ious = np_iou(anchors, gt_boxes) # NA x NB
ious_argmax_per_anchor = box_ious.argmax(axis=1) # NA,
ious_max_per_anchor = box_ious.max(axis=1)
ious_max_per_gt = np.amax(box_ious, axis=0, keepdims=True) # 1xNB
# for each gt, find all those anchors (including ties) that has the max ious with it
anchors_with_max_iou_per_gt = np.where(box_ious == ious_max_per_gt)[0]
# Setting NA labels: 1--fg 0--bg -1--ignore
anchor_labels = -np.ones((NA,), dtype='int32') # NA,
# the order of setting neg/pos labels matter
anchor_labels[anchors_with_max_iou_per_gt] = 1
anchor_labels[ious_max_per_anchor >= cfg.RPN.POSITIVE_ANCHOR_THRESH] = 1
anchor_labels[ious_max_per_anchor < cfg.RPN.NEGATIVE_ANCHOR_THRESH] = 0
# label all non-ignore candidate boxes which overlap crowd as ignore
if crowd_boxes.size > 0:
cand_inds = np.where(anchor_labels >= 0)[0]
cand_anchors = anchors[cand_inds]
ioas = np_ioa(crowd_boxes, cand_anchors)
overlap_with_crowd = cand_inds[ioas.max(axis=0) > cfg.RPN.CROWD_OVERLAP_THRESH]
anchor_labels[overlap_with_crowd] = -1
# Subsample fg labels: ignore some fg if fg is too many
target_num_fg = int(cfg.RPN.BATCH_PER_IM * cfg.RPN.FG_RATIO)
fg_inds = filter_box_label(anchor_labels, 1, target_num_fg)
# Keep an image even if there is no foreground anchors
# if len(fg_inds) == 0:
# raise MalformedData("No valid foreground for RPN!")
# Subsample bg labels. num_bg is not allowed to be too many
old_num_bg = np.sum(anchor_labels == 0)
if old_num_bg == 0:
# No valid bg in this image, skip.
raise MalformedData("No valid background for RPN!")
target_num_bg = cfg.RPN.BATCH_PER_IM - len(fg_inds)
filter_box_label(anchor_labels, 0, target_num_bg) # ignore return values
# Set anchor boxes: the best gt_box for each fg anchor
anchor_boxes = np.zeros((NA, 4), dtype='float32')
fg_boxes = gt_boxes[ious_argmax_per_anchor[fg_inds], :]
anchor_boxes[fg_inds, :] = fg_boxes
# assert len(fg_inds) + np.sum(anchor_labels == 0) == cfg.RPN.BATCH_PER_IM
return anchor_labels, anchor_boxes
|
[
"\n Label each anchor as fg/bg/ignore.\n Args:\n anchors: Ax4 float\n gt_boxes: Bx4 float, non-crowd\n crowd_boxes: Cx4 float\n\n Returns:\n anchor_labels: (A,) int. Each element is {-1, 0, 1}\n anchor_boxes: Ax4. Contains the target gt_box for each anchor when the anchor is fg.\n "
] |
Please provide a description of the function:def get_rpn_anchor_input(im, boxes, is_crowd):
boxes = boxes.copy()
all_anchors = np.copy(get_all_anchors())
# fHxfWxAx4 -> (-1, 4)
featuremap_anchors_flatten = all_anchors.reshape((-1, 4))
# only use anchors inside the image
inside_ind, inside_anchors = filter_boxes_inside_shape(featuremap_anchors_flatten, im.shape[:2])
# obtain anchor labels and their corresponding gt boxes
anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1])
# Fill them back to original size: fHxfWx1, fHxfWx4
anchorH, anchorW = all_anchors.shape[:2]
featuremap_labels = -np.ones((anchorH * anchorW * cfg.RPN.NUM_ANCHOR, ), dtype='int32')
featuremap_labels[inside_ind] = anchor_labels
featuremap_labels = featuremap_labels.reshape((anchorH, anchorW, cfg.RPN.NUM_ANCHOR))
featuremap_boxes = np.zeros((anchorH * anchorW * cfg.RPN.NUM_ANCHOR, 4), dtype='float32')
featuremap_boxes[inside_ind, :] = anchor_gt_boxes
featuremap_boxes = featuremap_boxes.reshape((anchorH, anchorW, cfg.RPN.NUM_ANCHOR, 4))
return featuremap_labels, featuremap_boxes
|
[
"\n Args:\n im: an image\n boxes: nx4, floatbox, gt. shoudn't be changed\n is_crowd: n,\n\n Returns:\n The anchor labels and target boxes for each pixel in the featuremap.\n fm_labels: fHxfWxNA\n fm_boxes: fHxfWxNAx4\n NA will be NUM_ANCHOR_SIZES x NUM_ANCHOR_RATIOS\n "
] |
Please provide a description of the function:def get_multilevel_rpn_anchor_input(im, boxes, is_crowd):
boxes = boxes.copy()
anchors_per_level = get_all_anchors_fpn()
flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level]
all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0)
inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2])
anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1])
# map back to all_anchors, then split to each level
num_all_anchors = all_anchors_flatten.shape[0]
all_labels = -np.ones((num_all_anchors, ), dtype='int32')
all_labels[inside_ind] = anchor_labels
all_boxes = np.zeros((num_all_anchors, 4), dtype='float32')
all_boxes[inside_ind] = anchor_gt_boxes
start = 0
multilevel_inputs = []
for level_anchor in anchors_per_level:
assert level_anchor.shape[2] == len(cfg.RPN.ANCHOR_RATIOS)
anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS
num_anchor_this_level = np.prod(anchor_shape)
end = start + num_anchor_this_level
multilevel_inputs.append(
(all_labels[start: end].reshape(anchor_shape),
all_boxes[start: end, :].reshape(anchor_shape + (4,))
))
start = end
assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors)
return multilevel_inputs
|
[
"\n Args:\n im: an image\n boxes: nx4, floatbox, gt. shoudn't be changed\n is_crowd: n,\n\n Returns:\n [(fm_labels, fm_boxes)]: Returns a tuple for each FPN level.\n Each tuple contains the anchor labels and target boxes for each pixel in the featuremap.\n\n fm_labels: fHxfWx NUM_ANCHOR_RATIOS\n fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4\n "
] |
Please provide a description of the function:def get_train_dataflow():
roidbs = DetectionDataset().load_training_roidbs(cfg.DATA.TRAIN)
print_class_histogram(roidbs)
# Valid training images should have at least one fg box.
# But this filter shall not be applied for testing.
num = len(roidbs)
roidbs = list(filter(lambda img: len(img['boxes'][img['is_crowd'] == 0]) > 0, roidbs))
logger.info("Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format(
num - len(roidbs), len(roidbs)))
ds = DataFromList(roidbs, shuffle=True)
aug = imgaug.AugmentorList(
[CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE),
imgaug.Flip(horiz=True)])
def preprocess(roidb):
fname, boxes, klass, is_crowd = roidb['file_name'], roidb['boxes'], roidb['class'], roidb['is_crowd']
boxes = np.copy(boxes)
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
im = im.astype('float32')
height, width = im.shape[:2]
# assume floatbox as input
assert boxes.dtype == np.float32, "Loader has to return floating point boxes!"
if not cfg.DATA.ABSOLUTE_COORD:
boxes[:, 0::2] *= width
boxes[:, 1::2] *= height
# augmentation:
im, params = aug.augment_return_params(im)
points = box_to_point8(boxes)
points = aug.augment_coords(points, params)
boxes = point8_to_box(points)
assert np.min(np_area(boxes)) > 0, "Some boxes have zero area!"
ret = {'image': im}
# rpn anchor:
try:
if cfg.MODE_FPN:
multilevel_anchor_inputs = get_multilevel_rpn_anchor_input(im, boxes, is_crowd)
for i, (anchor_labels, anchor_boxes) in enumerate(multilevel_anchor_inputs):
ret['anchor_labels_lvl{}'.format(i + 2)] = anchor_labels
ret['anchor_boxes_lvl{}'.format(i + 2)] = anchor_boxes
else:
# anchor_labels, anchor_boxes
ret['anchor_labels'], ret['anchor_boxes'] = get_rpn_anchor_input(im, boxes, is_crowd)
boxes = boxes[is_crowd == 0] # skip crowd boxes in training target
klass = klass[is_crowd == 0]
ret['gt_boxes'] = boxes
ret['gt_labels'] = klass
if not len(boxes):
raise MalformedData("No valid gt_boxes!")
except MalformedData as e:
log_once("Input {} is filtered for training: {}".format(fname, str(e)), 'warn')
return None
if cfg.MODE_MASK:
# augmentation will modify the polys in-place
segmentation = copy.deepcopy(roidb['segmentation'])
segmentation = [segmentation[k] for k in range(len(segmentation)) if not is_crowd[k]]
assert len(segmentation) == len(boxes)
# Apply augmentation on polygon coordinates.
# And produce one image-sized binary mask per box.
masks = []
width_height = np.asarray([width, height], dtype=np.float32)
for polys in segmentation:
if not cfg.DATA.ABSOLUTE_COORD:
polys = [p * width_height for p in polys]
polys = [aug.augment_coords(p, params) for p in polys]
masks.append(segmentation_to_mask(polys, im.shape[0], im.shape[1]))
masks = np.asarray(masks, dtype='uint8') # values in {0, 1}
ret['gt_masks'] = masks
# from viz import draw_annotation, draw_mask
# viz = draw_annotation(im, boxes, klass)
# for mask in masks:
# viz = draw_mask(viz, mask)
# tpviz.interactive_imshow(viz)
return ret
if cfg.DATA.NUM_WORKERS > 0:
buffer_size = cfg.DATA.NUM_WORKERS * 20
if cfg.TRAINER == 'horovod':
ds = MultiThreadMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size)
# MPI does not like fork()
else:
ds = MultiProcessMapDataZMQ(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size)
else:
ds = MapData(ds, preprocess)
return ds
|
[
"\n Return a training dataflow. Each datapoint consists of the following:\n\n An image: (h, w, 3),\n\n 1 or more pairs of (anchor_labels, anchor_boxes):\n anchor_labels: (h', w', NA)\n anchor_boxes: (h', w', NA, 4)\n\n gt_boxes: (N, 4)\n gt_labels: (N,)\n\n If MODE_MASK, gt_masks: (N, h, w)\n "
] |
Please provide a description of the function:def get_eval_dataflow(name, shard=0, num_shards=1):
roidbs = DetectionDataset().load_inference_roidbs(name)
num_imgs = len(roidbs)
img_per_shard = num_imgs // num_shards
img_range = (shard * img_per_shard, (shard + 1) * img_per_shard if shard + 1 < num_shards else num_imgs)
# no filter for training
ds = DataFromListOfDict(roidbs[img_range[0]: img_range[1]], ['file_name', 'image_id'])
def f(fname):
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
return im
ds = MapDataComponent(ds, f, 0)
# Evaluation itself may be multi-threaded, therefore don't add prefetch here.
return ds
|
[
"\n Args:\n name (str): name of the dataset to evaluate\n shard, num_shards: to get subset of evaluation data\n "
] |
Please provide a description of the function:def override_to_local_variable(enable=True):
if enable:
def custom_getter(getter, name, *args, **kwargs):
_replace_global_by_local(kwargs)
return getter(name, *args, **kwargs)
with custom_getter_scope(custom_getter):
yield
else:
yield
|
[
"\n Returns:\n a context where all variables will be created as local.\n "
] |
Please provide a description of the function:def merge_grad_list(all_grads, all_vars):
return [list(zip(gs, vs)) for gs, vs in zip(all_grads, all_vars)]
|
[
"\n Args:\n all_grads (K x N): gradients\n all_vars(K x N): variables\n\n Return:\n K x N x 2: list of list of (grad, var) pairs\n "
] |
Please provide a description of the function:def allreduce_grads(all_grads, average):
if get_tf_version_tuple() <= (1, 12):
from tensorflow.contrib import nccl
else:
from tensorflow.python.ops import nccl_ops as nccl
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads
new_all_grads = [] # N x K
for grads in zip(*all_grads):
summed = nccl.all_sum(grads)
grads_for_devices = [] # K
for g in summed:
with tf.device(g.device):
# tensorflow/benchmarks didn't average gradients
if average:
g = tf.multiply(g, 1.0 / nr_tower)
grads_for_devices.append(g)
new_all_grads.append(grads_for_devices)
# transpose to K x N
ret = list(zip(*new_all_grads))
return ret
|
[
"\n All-reduce average the gradients among K devices. Results are broadcasted to all devices.\n\n Args:\n all_grads (K x N): List of list of gradients. N is the number of variables.\n average (bool): average gradients or not.\n\n Returns:\n K x N: same as input, but each grad is replaced by the average over K devices.\n "
] |
Please provide a description of the function:def allreduce_grads_hierarchical(all_grads, devices, average=False):
num_gpu = len(devices)
assert num_gpu == 8, num_gpu
assert len(all_grads) == num_gpu, len(all_grads)
group_size = num_gpu // 2
agg_all_grads = [] # N x K
for varid, grads in enumerate(zip(*all_grads)):
# grads: K gradients
g0_main_gpu = varid % num_gpu
g1_main_gpu = (g0_main_gpu + group_size) % num_gpu
g0_start = 0 if g0_main_gpu < group_size else group_size
g1_start = 0 if g1_main_gpu < group_size else group_size
assert g0_start != g1_start
g0_grads = grads[g0_start: g0_start + group_size]
g1_grads = grads[g1_start: g1_start + group_size]
with tf.device(devices[g0_main_gpu]):
g0_agg = tf.add_n(g0_grads, name='group0_agg')
with tf.device(devices[g1_main_gpu]):
g1_agg = tf.add_n(g1_grads, name='group1_agg')
g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg')
with tf.device(devices[g0_main_gpu]):
g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg')
agg_grads = [] # K aggregated grads
for k in range(num_gpu):
if (k < group_size) == (g0_main_gpu < group_size):
main_gpu = g0_total_agg
else:
main_gpu = g1_total_agg
with tf.device(devices[k]):
if not average:
device_total_agg = tf.identity(
main_gpu, name='device{}_total_agg'.format(k))
else:
# TODO where to put average?
device_total_agg = tf.multiply(
main_gpu, 1.0 / num_gpu, name='device{}_total_agg'.format(k))
agg_grads.append(device_total_agg)
agg_all_grads.append(agg_grads)
# transpose
agg_all_grads = list(zip(*agg_all_grads)) # K x Nvar
return agg_all_grads
|
[
"\n Hierarchical allreduce for DGX-1 system.\n\n Args:\n all_grads (K x N): List of list of gradients. N is the number of variables.\n devices ([str]): K str for the K devices.\n average (bool): average gradients or not.\n\n Returns:\n (K x N): same as input, but each grad is replaced by the average over K lists.\n "
] |
Please provide a description of the function:def aggregate_grads(all_grads,
colocation=False,
devices=None,
average=True):
assert not (devices is not None and colocation)
if devices is not None:
assert isinstance(devices, list), devices
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads[0]
def aggregate(grads):
if average:
return tf.multiply(tf.add_n(grads), 1.0 / nr_tower)
else:
return tf.add_n(grads)
ret = []
for idx, grad_and_vars in enumerate(zip(*all_grads)):
# Ngpu * 2
v = grad_and_vars[0][1]
grads = [g for (g, _) in grad_and_vars]
if colocation:
with tf.device(v.device): # colocate summed grad with var
grad = aggregate(grads)
elif devices is None:
grad = aggregate(grads)
else:
dev = devices[idx % len(devices)]
with tf.device(dev):
grad = aggregate(grads)
ret.append((grad, v))
return ret
|
[
"\n Average the gradients.\n\n Args:\n all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.\n The variables have to be the same across the K lists.\n colocation (bool): colocate gradient averaging on the device of the variable.\n devices (list[str]): assign the averaging to these device in\n round-robin. Cannot be used together with ``colocation``.\n average (bool): do average or sum\n\n Returns:\n (N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K.\n "
] |
Please provide a description of the function:def compute_strategy(self, grads):
for g in grads:
assert g.shape.is_fully_defined(), "Shape of {} is {}!".format(g.name, g.shape)
self._shapes = [g.shape for g in grads]
self._sizes = [g.shape.num_elements() for g in grads]
self._total_size = sum(self._sizes)
if self._total_size / self._num_split < 1024:
logger.info("Skip GradientPacker due to too few gradients.")
return False
# should have the same dtype
dtypes = set([g.dtype for g in grads])
if len(dtypes) != 1:
logger.info("Skip GradientPacker due to inconsistent gradient types.")
return False
self._grad_dtype = grads[0].dtype
split_size = self._total_size // self._num_split
split_size_last = self._total_size - split_size * (self._num_split - 1)
self._split_sizes = [split_size] * (self._num_split - 1) + [split_size_last]
logger.info(
"Will pack {} gradients of total dimension={} into {} splits.".format(
len(self._sizes), self._total_size, self._num_split))
return True
|
[
"\n Returns:\n bool - False if grads cannot be packed due to various reasons.\n "
] |
Please provide a description of the function:def pack(self, grads):
for i, g in enumerate(grads):
assert g.shape == self._shapes[i]
with cached_name_scope("GradientPacker", top_level=False):
concat_grads = tf.concat([tf.reshape(g, [-1]) for g in grads], 0, name='concatenated_grads')
# concat_grads = tf.cast(concat_grads, tf.float16)
grad_packs = tf.split(concat_grads, self._split_sizes)
return grad_packs
|
[
"\n Args:\n grads (list): list of gradient tensors\n\n Returns:\n packed list of gradient tensors to be aggregated.\n "
] |
Please provide a description of the function:def pack_all(self, all_grads, devices):
ret = [] # #GPU x #split
for dev, grads in zip(devices, all_grads):
with tf.device(dev):
ret.append(self.pack(grads))
return ret
|
[
"\n Args:\n all_grads: K x N, K lists of gradients to be packed\n "
] |
Please provide a description of the function:def unpack_all(self, all_packed, devices):
all_grads = [] # #GPU x #Var
for dev, packed_grads_single_device in zip(devices, all_packed):
with tf.device(dev):
all_grads.append(self.unpack(packed_grads_single_device))
return all_grads
|
[
"\n Args:\n all_packed: K lists of packed gradients.\n "
] |
Please provide a description of the function:def fpn_model(features):
assert len(features) == 4, features
num_channel = cfg.FPN.NUM_CHANNEL
use_gn = cfg.FPN.NORM == 'GN'
def upsample2x(name, x):
return FixedUnPooling(
name, x, 2, unpool_mat=np.ones((2, 2), dtype='float32'),
data_format='channels_first')
# tf.image.resize is, again, not aligned.
# with tf.name_scope(name):
# shape2d = tf.shape(x)[2:]
# x = tf.transpose(x, [0, 2, 3, 1])
# x = tf.image.resize_nearest_neighbor(x, shape2d * 2, align_corners=True)
# x = tf.transpose(x, [0, 3, 1, 2])
# return x
with argscope(Conv2D, data_format='channels_first',
activation=tf.identity, use_bias=True,
kernel_initializer=tf.variance_scaling_initializer(scale=1.)):
lat_2345 = [Conv2D('lateral_1x1_c{}'.format(i + 2), c, num_channel, 1)
for i, c in enumerate(features)]
if use_gn:
lat_2345 = [GroupNorm('gn_c{}'.format(i + 2), c) for i, c in enumerate(lat_2345)]
lat_sum_5432 = []
for idx, lat in enumerate(lat_2345[::-1]):
if idx == 0:
lat_sum_5432.append(lat)
else:
lat = lat + upsample2x('upsample_lat{}'.format(6 - idx), lat_sum_5432[-1])
lat_sum_5432.append(lat)
p2345 = [Conv2D('posthoc_3x3_p{}'.format(i + 2), c, num_channel, 3)
for i, c in enumerate(lat_sum_5432[::-1])]
if use_gn:
p2345 = [GroupNorm('gn_p{}'.format(i + 2), c) for i, c in enumerate(p2345)]
p6 = MaxPooling('maxpool_p6', p2345[-1], pool_size=1, strides=2, data_format='channels_first', padding='VALID')
return p2345 + [p6]
|
[
"\n Args:\n features ([tf.Tensor]): ResNet features c2-c5\n\n Returns:\n [tf.Tensor]: FPN features p2-p6\n "
] |
Please provide a description of the function:def fpn_map_rois_to_levels(boxes):
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.cast(tf.floor(
4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32)
# RoI levels range from 2~5 (not 6)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)), # == is not supported
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)]
level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)]
num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes
|
[
"\n Assign boxes to level 2~5.\n\n Args:\n boxes (nx4):\n\n Returns:\n [tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.\n [tf.Tensor]: 4 tensors, the gathered boxes in each level.\n\n Be careful that the returned tensor could be empty.\n "
] |
Please provide a description of the function:def multilevel_roi_align(features, rcnn_boxes, resolution):
assert len(features) == 4, features
# Reassign rcnn_boxes to levels
level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes)
all_rois = []
# Crop patches from corresponding levels
for i, boxes, featuremap in zip(itertools.count(), level_boxes, features):
with tf.name_scope('roi_level{}'.format(i + 2)):
boxes_on_featuremap = boxes * (1.0 / cfg.FPN.ANCHOR_STRIDES[i])
all_rois.append(roi_align(featuremap, boxes_on_featuremap, resolution))
# this can fail if using TF<=1.8 with MKL build
all_rois = tf.concat(all_rois, axis=0) # NCHW
# Unshuffle to the original order, to match the original samples
level_id_perm = tf.concat(level_ids, axis=0) # A permutation of 1~N
level_id_invert_perm = tf.invert_permutation(level_id_perm)
all_rois = tf.gather(all_rois, level_id_invert_perm)
return all_rois
|
[
"\n Args:\n features ([tf.Tensor]): 4 FPN feature level 2-5\n rcnn_boxes (tf.Tensor): nx4 boxes\n resolution (int): output spatial resolution\n Returns:\n NxC x res x res\n "
] |
Please provide a description of the function:def multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits):
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_anchors) == num_lvl
assert len(multilevel_label_logits) == num_lvl
assert len(multilevel_box_logits) == num_lvl
losses = []
with tf.name_scope('rpn_losses'):
for lvl in range(num_lvl):
anchors = multilevel_anchors[lvl]
label_loss, box_loss = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(),
multilevel_label_logits[lvl], multilevel_box_logits[lvl],
name_scope='level{}'.format(lvl + 2))
losses.extend([label_loss, box_loss])
total_label_loss = tf.add_n(losses[::2], name='label_loss')
total_box_loss = tf.add_n(losses[1::2], name='box_loss')
add_moving_summary(total_label_loss, total_box_loss)
return [total_label_loss, total_box_loss]
|
[
"\n Args:\n multilevel_anchors: #lvl RPNAnchors\n multilevel_label_logits: #lvl tensors of shape HxWxA\n multilevel_box_logits: #lvl tensors of shape HxWxAx4\n\n Returns:\n label_loss, box_loss\n "
] |
Please provide a description of the function:def generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d):
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_pred_boxes) == num_lvl
assert len(multilevel_label_logits) == num_lvl
training = get_current_tower_context().is_training
all_boxes = []
all_scores = []
if cfg.FPN.PROPOSAL_MODE == 'Level':
fpn_nms_topk = cfg.RPN.TRAIN_PER_LEVEL_NMS_TOPK if training else cfg.RPN.TEST_PER_LEVEL_NMS_TOPK
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(multilevel_label_logits[lvl], [-1]),
image_shape2d, fpn_nms_topk)
all_boxes.append(proposal_boxes)
all_scores.append(proposal_scores)
proposal_boxes = tf.concat(all_boxes, axis=0) # nx4
proposal_scores = tf.concat(all_scores, axis=0) # n
# Here we are different from Detectron.
# Detectron picks top-k within the batch, rather than within an image. However we do not have a batch.
proposal_topk = tf.minimum(tf.size(proposal_scores), fpn_nms_topk)
proposal_scores, topk_indices = tf.nn.top_k(proposal_scores, k=proposal_topk, sorted=False)
proposal_boxes = tf.gather(proposal_boxes, topk_indices)
else:
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
all_boxes.append(tf.reshape(pred_boxes_decoded, [-1, 4]))
all_scores.append(tf.reshape(multilevel_label_logits[lvl], [-1]))
all_boxes = tf.concat(all_boxes, axis=0)
all_scores = tf.concat(all_scores, axis=0)
proposal_boxes, proposal_scores = generate_rpn_proposals(
all_boxes, all_scores, image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if training else cfg.RPN.TEST_POST_NMS_TOPK)
tf.sigmoid(proposal_scores, name='probs') # for visualization
return tf.stop_gradient(proposal_boxes, name='boxes'), \
tf.stop_gradient(proposal_scores, name='scores')
|
[
"\n Args:\n multilevel_pred_boxes: #lvl HxWxAx4 boxes\n multilevel_label_logits: #lvl tensors of shape HxWxA\n\n Returns:\n boxes: kx4 float\n scores: k logits\n "
] |
Please provide a description of the function:def LayerNorm(
x, epsilon=1e-5,
use_bias=True, use_scale=True,
gamma_init=None, data_format='channels_last'):
data_format = get_data_format(data_format, keras_mode=False)
shape = x.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4]
mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True)
if data_format == 'NCHW':
chan = shape[1]
new_shape = [1, chan, 1, 1]
else:
chan = shape[-1]
new_shape = [1, 1, 1, chan]
if ndims == 2:
new_shape = [1, chan]
if use_bias:
beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1] * ndims, name='beta')
if use_scale:
if gamma_init is None:
gamma_init = tf.constant_initializer(1.0)
gamma = tf.get_variable('gamma', [chan], initializer=gamma_init)
gamma = tf.reshape(gamma, new_shape)
else:
gamma = tf.ones([1] * ndims, name='gamma')
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if use_scale:
vh.gamma = gamma
if use_bias:
vh.beta = beta
return ret
|
[
"\n Layer Normalization layer, as described in the paper:\n `Layer Normalization <https://arxiv.org/abs/1607.06450>`_.\n\n Args:\n x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format.\n epsilon (float): epsilon to avoid divide-by-zero.\n use_scale, use_bias (bool): whether to use the extra affine transformation or not.\n "
] |
Please provide a description of the function:def InstanceNorm(x, epsilon=1e-5, use_affine=True, gamma_init=None, data_format='channels_last'):
data_format = get_data_format(data_format, keras_mode=False)
shape = x.get_shape().as_list()
assert len(shape) == 4, "Input of InstanceNorm has to be 4D!"
if data_format == 'NHWC':
axis = [1, 2]
ch = shape[3]
new_shape = [1, 1, 1, ch]
else:
axis = [2, 3]
ch = shape[1]
new_shape = [1, ch, 1, 1]
assert ch is not None, "Input of InstanceNorm require known channel!"
mean, var = tf.nn.moments(x, axis, keep_dims=True)
if not use_affine:
return tf.divide(x - mean, tf.sqrt(var + epsilon), name='output')
beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
if gamma_init is None:
gamma_init = tf.constant_initializer(1.0)
gamma = tf.get_variable('gamma', [ch], initializer=gamma_init)
gamma = tf.reshape(gamma, new_shape)
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if use_affine:
vh.gamma = gamma
vh.beta = beta
return ret
|
[
"\n Instance Normalization, as in the paper:\n `Instance Normalization: The Missing Ingredient for Fast Stylization\n <https://arxiv.org/abs/1607.08022>`_.\n\n Args:\n x (tf.Tensor): a 4D tensor.\n epsilon (float): avoid divide-by-zero\n use_affine (bool): whether to apply learnable affine transformation\n "
] |
Please provide a description of the function:def proposal_metrics(iou):
# find best roi for each gt, for summary only
best_iou = tf.reduce_max(iou, axis=0)
mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt')
summaries = [mean_best_iou]
with tf.device('/cpu:0'):
for th in [0.3, 0.5]:
recall = tf.truediv(
tf.count_nonzero(best_iou >= th),
tf.size(best_iou, out_type=tf.int64),
name='recall_iou{}'.format(th))
summaries.append(recall)
add_moving_summary(*summaries)
|
[
"\n Add summaries for RPN proposals.\n\n Args:\n iou: nxm, #proposal x #gt\n "
] |
Please provide a description of the function:def sample_fast_rcnn_targets(boxes, gt_boxes, gt_labels):
iou = pairwise_iou(boxes, gt_boxes) # nxm
proposal_metrics(iou)
# add ground truth as proposals as well
boxes = tf.concat([boxes, gt_boxes], axis=0) # (n+m) x 4
iou = tf.concat([iou, tf.eye(tf.shape(gt_boxes)[0])], axis=0) # (n+m) x m
# #proposal=n+m from now on
def sample_fg_bg(iou):
fg_mask = tf.reduce_max(iou, axis=1) >= cfg.FRCNN.FG_THRESH
fg_inds = tf.reshape(tf.where(fg_mask), [-1])
num_fg = tf.minimum(int(
cfg.FRCNN.BATCH_PER_IM * cfg.FRCNN.FG_RATIO),
tf.size(fg_inds), name='num_fg')
fg_inds = tf.random_shuffle(fg_inds)[:num_fg]
bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1])
num_bg = tf.minimum(
cfg.FRCNN.BATCH_PER_IM - num_fg,
tf.size(bg_inds), name='num_bg')
bg_inds = tf.random_shuffle(bg_inds)[:num_bg]
add_moving_summary(num_fg, num_bg)
return fg_inds, bg_inds
fg_inds, bg_inds = sample_fg_bg(iou)
# fg,bg indices w.r.t proposals
best_iou_ind = tf.argmax(iou, axis=1) # #proposal, each in 0~m-1
fg_inds_wrt_gt = tf.gather(best_iou_ind, fg_inds) # num_fg
all_indices = tf.concat([fg_inds, bg_inds], axis=0) # indices w.r.t all n+m proposal boxes
ret_boxes = tf.gather(boxes, all_indices)
ret_labels = tf.concat(
[tf.gather(gt_labels, fg_inds_wrt_gt),
tf.zeros_like(bg_inds, dtype=tf.int64)], axis=0)
# stop the gradient -- they are meant to be training targets
return BoxProposals(
tf.stop_gradient(ret_boxes, name='sampled_proposal_boxes'),
tf.stop_gradient(ret_labels, name='sampled_labels'),
tf.stop_gradient(fg_inds_wrt_gt))
|
[
"\n Sample some boxes from all proposals for training.\n #fg is guaranteed to be > 0, because ground truth boxes will be added as proposals.\n\n Args:\n boxes: nx4 region proposals, floatbox\n gt_boxes: mx4, floatbox\n gt_labels: m, int32\n\n Returns:\n A BoxProposals instance.\n sampled_boxes: tx4 floatbox, the rois\n sampled_labels: t int64 labels, in [0, #class). Positive means foreground.\n fg_inds_wrt_gt: #fg indices, each in range [0, m-1].\n It contains the matching GT of each foreground roi.\n "
] |
Please provide a description of the function:def fastrcnn_outputs(feature, num_classes, class_agnostic_regression=False):
classification = FullyConnected(
'class', feature, num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
num_classes_for_box = 1 if class_agnostic_regression else num_classes
box_regression = FullyConnected(
'box', feature, num_classes_for_box * 4,
kernel_initializer=tf.random_normal_initializer(stddev=0.001))
box_regression = tf.reshape(box_regression, (-1, num_classes_for_box, 4), name='output_box')
return classification, box_regression
|
[
"\n Args:\n feature (any shape):\n num_classes(int): num_category + 1\n class_agnostic_regression (bool): if True, regression to N x 1 x 4\n\n Returns:\n cls_logits: N x num_class classification logits\n reg_logits: N x num_classx4 or Nx2x4 if class agnostic\n "
] |
Please provide a description of the function:def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits):
label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=label_logits)
label_loss = tf.reduce_mean(label_loss, name='label_loss')
fg_inds = tf.where(labels > 0)[:, 0]
fg_labels = tf.gather(labels, fg_inds)
num_fg = tf.size(fg_inds, out_type=tf.int64)
empty_fg = tf.equal(num_fg, 0)
if int(fg_box_logits.shape[1]) > 1:
indices = tf.stack(
[tf.range(num_fg), fg_labels], axis=1) # #fgx2
fg_box_logits = tf.gather_nd(fg_box_logits, indices)
else:
fg_box_logits = tf.reshape(fg_box_logits, [-1, 4])
with tf.name_scope('label_metrics'), tf.device('/cpu:0'):
prediction = tf.argmax(label_logits, axis=1, name='label_prediction')
correct = tf.cast(tf.equal(prediction, labels), tf.float32) # boolean/integer gather is unavailable on GPU
accuracy = tf.reduce_mean(correct, name='accuracy')
fg_label_pred = tf.argmax(tf.gather(label_logits, fg_inds), axis=1)
num_zero = tf.reduce_sum(tf.cast(tf.equal(fg_label_pred, 0), tf.int64), name='num_zero')
false_negative = tf.where(
empty_fg, 0., tf.cast(tf.truediv(num_zero, num_fg), tf.float32), name='false_negative')
fg_accuracy = tf.where(
empty_fg, 0., tf.reduce_mean(tf.gather(correct, fg_inds)), name='fg_accuracy')
box_loss = tf.losses.huber_loss(
fg_boxes, fg_box_logits, reduction=tf.losses.Reduction.SUM)
box_loss = tf.truediv(
box_loss, tf.cast(tf.shape(labels)[0], tf.float32), name='box_loss')
add_moving_summary(label_loss, box_loss, accuracy,
fg_accuracy, false_negative, tf.cast(num_fg, tf.float32, name='num_fg_label'))
return [label_loss, box_loss]
|
[
"\n Args:\n labels: n,\n label_logits: nxC\n fg_boxes: nfgx4, encoded\n fg_box_logits: nfgxCx4 or nfgx1x4 if class agnostic\n\n Returns:\n label_loss, box_loss\n "
] |
Please provide a description of the function:def fastrcnn_predictions(boxes, scores):
assert boxes.shape[1] == cfg.DATA.NUM_CLASS
assert scores.shape[1] == cfg.DATA.NUM_CLASS
boxes = tf.transpose(boxes, [1, 0, 2])[1:, :, :] # #catxnx4
scores = tf.transpose(scores[:, 1:], [1, 0]) # #catxn
def f(X):
prob, box = X
output_shape = tf.shape(prob, out_type=tf.int64)
# filter by score threshold
ids = tf.reshape(tf.where(prob > cfg.TEST.RESULT_SCORE_THRESH), [-1])
prob = tf.gather(prob, ids)
box = tf.gather(box, ids)
# NMS within each class
selection = tf.image.non_max_suppression(
box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH)
selection = tf.gather(ids, selection)
if get_tf_version_tuple() >= (1, 13):
sorted_selection = tf.sort(selection, direction='ASCENDING')
mask = tf.sparse.SparseTensor(indices=tf.expand_dims(sorted_selection, 1),
values=tf.ones_like(sorted_selection, dtype=tf.bool),
dense_shape=output_shape)
mask = tf.sparse.to_dense(mask, default_value=False)
else:
# this function is deprecated by TF
sorted_selection = -tf.nn.top_k(-selection, k=tf.size(selection))[0]
mask = tf.sparse_to_dense(
sparse_indices=sorted_selection,
output_shape=output_shape,
sparse_values=True,
default_value=False)
return mask
# TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750
buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]
masks = tf.map_fn(f, (scores, boxes), dtype=tf.bool,
parallel_iterations=1 if buggy_tf else 10) # #cat x N
selected_indices = tf.where(masks) # #selection x 2, each is (cat_id, box_id)
scores = tf.boolean_mask(scores, masks)
# filter again by sorting scores
topk_scores, topk_indices = tf.nn.top_k(
scores,
tf.minimum(cfg.TEST.RESULTS_PER_IM, tf.size(scores)),
sorted=False)
filtered_selection = tf.gather(selected_indices, topk_indices)
cat_ids, box_ids = tf.unstack(filtered_selection, axis=1)
final_scores = tf.identity(topk_scores, name='scores')
final_labels = tf.add(cat_ids, 1, name='labels')
final_ids = tf.stack([cat_ids, box_ids], axis=1, name='all_ids')
final_boxes = tf.gather_nd(boxes, final_ids, name='boxes')
return final_boxes, final_scores, final_labels
|
[
"\n Generate final results from predictions of all proposals.\n\n Args:\n boxes: n#classx4 floatbox in float32\n scores: nx#class\n\n Returns:\n boxes: Kx4\n scores: K\n labels: K\n ",
"\n prob: n probabilities\n box: nx4 boxes\n\n Returns: n boolean, the selection\n "
] |
Please provide a description of the function:def fastrcnn_2fc_head(feature):
dim = cfg.FPN.FRCNN_FC_HEAD_DIM
init = tf.variance_scaling_initializer()
hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu)
hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu)
return hidden
|
[
"\n Args:\n feature (any shape):\n\n Returns:\n 2D head feature\n "
] |
Please provide a description of the function:def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None):
assert norm in [None, 'GN'], norm
l = feature
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out',
distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
for k in range(num_convs):
l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
if norm is not None:
l = GroupNorm('gn{}'.format(k), l)
l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
return l
|
[
"\n Args:\n feature (NCHW):\n num_classes(int): num_category + 1\n num_convs (int): number of conv layers\n norm (str or None): either None or 'GN'\n\n Returns:\n 2D head feature\n "
] |
Please provide a description of the function:def fg_box_logits(self):
return tf.gather(self.box_logits, self.proposals.fg_inds(), name='fg_box_logits')
|
[
" Returns: #fg x ? x 4 "
] |
Please provide a description of the function:def decoded_output_boxes(self):
anchors = tf.tile(tf.expand_dims(self.proposals.boxes, 1),
[1, cfg.DATA.NUM_CLASS, 1]) # N x #class x 4
decoded_boxes = decode_bbox_target(
self.box_logits / self.bbox_regression_weights,
anchors
)
return decoded_boxes
|
[
" Returns: N x #class x 4 "
] |
Please provide a description of the function:def decoded_output_boxes_class_agnostic(self):
assert self._bbox_class_agnostic
box_logits = tf.reshape(self.box_logits, [-1, 4])
decoded = decode_bbox_target(
box_logits / self.bbox_regression_weights,
self.proposals.boxes
)
return decoded
|
[
" Returns: Nx4 "
] |
Please provide a description of the function:def output_scores(self, name=None):
return tf.nn.softmax(self.label_logits, name=name)
|
[
" Returns: N x #class scores, summed to one for each box."
] |
Please provide a description of the function:def _on_state(self, state, client):
def cb(outputs):
try:
distrib, value = outputs.result()
except CancelledError:
logger.info("Client {} cancelled.".format(client.ident))
return
assert np.all(np.isfinite(distrib)), distrib
action = np.random.choice(len(distrib), p=distrib)
client.memory.append(TransitionExperience(
state, action, reward=None, value=value, prob=distrib[action]))
self.send_queue.put([client.ident, dumps(action)])
self.async_predictor.put_task([state], cb)
|
[
"\n Launch forward prediction for the new state given by some client.\n "
] |
Please provide a description of the function:def _process_msg(self, client, state, reward, isOver):
# in the first message, only state is valid,
# reward&isOver should be discarded
if len(client.memory) > 0:
client.memory[-1].reward = reward
if isOver:
# should clear client's memory and put to queue
self._parse_memory(0, client, True)
else:
if len(client.memory) == LOCAL_TIME_MAX + 1:
R = client.memory[-1].value
self._parse_memory(R, client, False)
# feed state and return action
self._on_state(state, client)
|
[
"\n Process a message sent from some client.\n "
] |
Please provide a description of the function:def discriminator(self, imgs, y):
yv = y
y = tf.reshape(y, [-1, 1, 1, 10])
with argscope(Conv2D, kernel_size=5, strides=1):
l = (LinearWrap(imgs)
.ConcatWith(tf.tile(y, [1, 28, 28, 1]), 3)
.Conv2D('conv0', 11)
.tf.nn.leaky_relu()
.ConcatWith(tf.tile(y, [1, 14, 14, 1]), 3)
.Conv2D('conv1', 74)
.BatchNorm('bn1')
.tf.nn.leaky_relu()
.apply(batch_flatten)
.ConcatWith(yv, 1)
.FullyConnected('fc1', 1024, activation=tf.identity)
.BatchNorm('bn2')
.tf.nn.leaky_relu()
.ConcatWith(yv, 1)
.FullyConnected('fct', 1, activation=tf.identity)())
return l
|
[
" return a (b, 1) logits"
] |
Please provide a description of the function:def export_compact(self, filename, optimize=True, toco_compatible=False):
if toco_compatible:
assert optimize, "toco_compatible is only effective when optimize=True!"
self.graph = self.config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(self.config.input_signature)
with PredictTowerContext(''):
self.config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(self.config.input_names)
output_tensors = get_tensors_by_names(self.config.output_names)
self.config.session_init._setup_graph()
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
dtypes = [n.dtype for n in input_tensors]
# freeze variables to constants
frozen_graph_def = graph_util.convert_variables_to_constants(
sess,
self.graph.as_graph_def(),
[n.name[:-2] for n in output_tensors],
variable_names_whitelist=None,
variable_names_blacklist=None)
# prune unused nodes from graph
if optimize:
toco_args = () if get_tf_version_tuple() < (1, 8) else (toco_compatible, )
frozen_graph_def = optimize_for_inference_lib.optimize_for_inference(
frozen_graph_def,
[n.name[:-2] for n in input_tensors],
[n.name[:-2] for n in output_tensors],
[dtype.as_datatype_enum for dtype in dtypes],
*toco_args)
with gfile.FastGFile(filename, "wb") as f:
f.write(frozen_graph_def.SerializeToString())
logger.info("Output graph written to {}.".format(filename))
|
[
"Create a self-contained inference-only graph and write final graph (in pb format) to disk.\n\n Args:\n filename (str): path to the output graph\n optimize (bool): whether to use TensorFlow's `optimize_for_inference`\n to prune and optimize the graph. This does not work on all types of graphs.\n toco_compatible (bool): See TensorFlow's\n `optimize_for_inference\n <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py>`_\n for details. Only available after TF 1.8.\n "
] |
Please provide a description of the function:def export_serving(self, filename,
tags=[tf.saved_model.SERVING if is_tfv2() else tf.saved_model.tag_constants.SERVING],
signature_name='prediction_pipeline'):
self.graph = self.config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(self.config.input_signature)
with PredictTowerContext(''):
self.config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(self.config.input_names)
saved_model = tfv1.saved_model.utils
inputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in input_tensors}
output_tensors = get_tensors_by_names(self.config.output_names)
outputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in output_tensors}
self.config.session_init._setup_graph()
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
builder = tfv1.saved_model.builder.SavedModelBuilder(filename)
prediction_signature = tfv1.saved_model.signature_def_utils.build_signature_def(
inputs=inputs_signatures,
outputs=outputs_signatures,
method_name=tfv1.saved_model.signature_constants.PREDICT_METHOD_NAME)
builder.add_meta_graph_and_variables(
sess, tags,
signature_def_map={signature_name: prediction_signature})
builder.save()
logger.info("SavedModel created at {}.".format(filename))
|
[
"\n Converts a checkpoint and graph to a servable for TensorFlow Serving.\n Use TF's `SavedModelBuilder` to export a trained model without tensorpack dependency.\n\n Args:\n filename (str): path for export directory\n tags (list): list of user specified tags\n signature_name (str): name of signature for prediction\n\n Note:\n This produces\n\n .. code-block:: none\n\n variables/ # output from the vanilla Saver\n variables.data-?????-of-?????\n variables.index\n saved_model.pb # a `SavedModel` protobuf\n\n Currently, we only support a single signature, which is the general PredictSignatureDef:\n https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/signature_defs.md\n "
] |
Please provide a description of the function:def _read_sql_with_offset_pandas_on_ray(
partition_column,
start,
end,
num_splits,
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
): # pragma: no cover
from .sql import query_put_bounders
query_with_bounders = query_put_bounders(sql, partition_column, start, end)
pandas_df = pandas.read_sql(
query_with_bounders,
con,
index_col=index_col,
coerce_float=coerce_float,
params=params,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
index = len(pandas_df)
return _split_result_for_readers(1, num_splits, pandas_df) + [index]
|
[
"Use a Ray task to read a chunk of SQL source.\n\n Note: Ray functions are not detected by codecov (thus pragma: no cover)\n "
] |
Please provide a description of the function:def read_sql(
cls,
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
partition_column=None,
lower_bound=None,
upper_bound=None,
max_sessions=None,
):
from .sql import is_distributed, get_query_info
if not is_distributed(partition_column, lower_bound, upper_bound):
warnings.warn("Defaulting to Modin core implementation")
return PandasOnRayIO.read_sql(
sql,
con,
index_col,
coerce_float=coerce_float,
params=params,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
# starts the distributed alternative
cols_names, query = get_query_info(sql, con, partition_column)
num_parts = min(cls.frame_mgr_cls._compute_num_partitions(), max_sessions)
num_splits = min(len(cols_names), num_parts)
diff = (upper_bound - lower_bound) + 1
min_size = diff // num_parts
rest = diff % num_parts
partition_ids = []
index_ids = []
end = lower_bound - 1
for part in range(num_parts):
if rest:
size = min_size + 1
rest -= 1
else:
size = min_size
start = end + 1
end = start + size - 1
partition_id = _read_sql_with_offset_pandas_on_ray._remote(
args=(
partition_column,
start,
end,
num_splits,
query,
con,
index_col,
coerce_float,
params,
parse_dates,
columns,
chunksize,
),
num_return_vals=num_splits + 1,
)
partition_ids.append(
[PandasOnRayFramePartition(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
new_index = pandas.RangeIndex(sum(ray.get(index_ids)))
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names
)
return new_query_compiler
|
[
" Read SQL query or database table into a DataFrame.\n\n Args:\n sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name.\n con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode)\n index_col: Column(s) to set as index(MultiIndex).\n coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to\n floating point, useful for SQL result sets.\n params: List of parameters to pass to execute method. The syntax used\n to pass parameters is database driver dependent. Check your\n database driver documentation for which of the five syntax styles,\n described in PEP 249's paramstyle, is supported.\n parse_dates:\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict corresponds\n to the keyword arguments of :func:`pandas.to_datetime`\n Especially useful with databases without native Datetime support,\n such as SQLite.\n columns: List of column names to select from SQL table (only used when reading a table).\n chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk.\n partition_column: column used to share the data between the workers (MUST be a INTEGER column)\n lower_bound: the minimum value to be requested from the partition_column\n upper_bound: the maximum value to be requested from the partition_column\n max_sessions: the maximum number of simultaneous connections allowed to use\n\n Returns:\n Pandas Dataframe\n "
] |
Please provide a description of the function:def _inherit_docstrings(parent, excluded=[]):
def decorator(cls):
if parent not in excluded:
cls.__doc__ = parent.__doc__
for attr, obj in cls.__dict__.items():
parent_obj = getattr(parent, attr, None)
if parent_obj in excluded or (
not callable(parent_obj) and not isinstance(parent_obj, property)
):
continue
if callable(obj):
obj.__doc__ = parent_obj.__doc__
elif isinstance(obj, property) and obj.fget is not None:
p = property(obj.fget, obj.fset, obj.fdel, parent_obj.__doc__)
setattr(cls, attr, p)
return cls
return decorator
|
[
"Creates a decorator which overwrites a decorated class' __doc__\n attribute with parent's __doc__ attribute. Also overwrites __doc__ of\n methods and properties defined in the class with the __doc__ of matching\n methods and properties in parent.\n\n Args:\n parent (object): Class from which the decorated class inherits __doc__.\n excluded (list): List of parent objects from which the class does not\n inherit docstrings.\n\n Returns:\n function: decorator which replaces the decorated class' documentation\n parent's documentation.\n "
] |
Please provide a description of the function:def time_logger(name):
start_time = time.time()
yield
end_time = time.time()
total_time = end_time - start_time
logging.info("%s; time: %ss", name, total_time)
|
[
"This logs the time usage of a code block"
] |
Please provide a description of the function:def initialize_ray():
if threading.current_thread().name == "MainThread":
plasma_directory = None
object_store_memory = os.environ.get("MODIN_MEMORY", None)
if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
from tempfile import gettempdir
plasma_directory = gettempdir()
# We may have already set the memory from the environment variable, we don't
# want to overwrite that value if we have.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
# Default to 8x memory for out of core
object_store_memory = 8 * mem_bytes
# In case anything failed above, we can still improve the memory for Modin.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
object_store_memory = int(
0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
)
# If the memory pool is smaller than 2GB, just use the default in ray.
if object_store_memory == 0:
object_store_memory = None
else:
object_store_memory = int(object_store_memory)
ray.init(
include_webui=False,
ignore_reinit_error=True,
plasma_directory=plasma_directory,
object_store_memory=object_store_memory,
)
# Register custom serializer for method objects to avoid warning message.
# We serialize `MethodType` objects when we use AxisPartition operations.
ray.register_custom_serializer(types.MethodType, use_pickle=True)
|
[
"Initializes ray based on environment variables and internal defaults."
] |
Please provide a description of the function:def apply(
self,
func,
num_splits=None,
other_axis_partition=None,
maintain_partitioning=True,
**kwargs
):
import dask
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return [
DaskFramePartition(dask.delayed(obj))
for obj in deploy_func_between_two_axis_partitions(
self.axis,
func,
num_splits,
len(self.list_of_blocks),
kwargs,
*dask.compute(
*tuple(
self.list_of_blocks + other_axis_partition.list_of_blocks
)
)
)
]
args = [self.axis, func, num_splits, kwargs, maintain_partitioning]
args.extend(dask.compute(*self.list_of_blocks))
return [
DaskFramePartition(dask.delayed(obj)) for obj in deploy_axis_func(*args)
]
|
[
"Applies func to the object.\n\n See notes in Parent class about this method.\n\n Args:\n func: The function to apply.\n num_splits: The number of times to split the result object.\n other_axis_partition: Another `DaskFrameAxisPartition` object to apply to\n func with this one.\n\n Returns:\n A list of `DaskFramePartition` objects.\n "
] |
Please provide a description of the function:def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
):
if sparse:
raise NotImplementedError(
"SparseDataFrame is not implemented. "
"To contribute to Modin, please visit "
"github.com/modin-project/modin."
)
if not isinstance(data, DataFrame):
ErrorMessage.default_to_pandas("`get_dummies` on non-DataFrame")
return DataFrame(
pandas.get_dummies(
data,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
columns=columns,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
)
else:
new_manager = data._query_compiler.get_dummies(
columns,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
drop_first=drop_first,
dtype=dtype,
)
return DataFrame(query_compiler=new_manager)
|
[
"Convert categorical variable into indicator variables.\n\n Args:\n data (array-like, Series, or DataFrame): data to encode.\n prefix (string, [string]): Prefix to apply to each encoded column\n label.\n prefix_sep (string, [string]): Separator between prefix and value.\n dummy_na (bool): Add a column to indicate NaNs.\n columns: Which columns to encode.\n sparse (bool): Not Implemented: If True, returns SparseDataFrame.\n drop_first (bool): Whether to remove the first level of encoded data.\n dtype: The dtype for the get_dummies call.\n\n Returns:\n DataFrame or one-hot encoded data.\n "
] |
Please provide a description of the function:def apply(
self,
func,
num_splits=None,
other_axis_partition=None,
maintain_partitioning=True,
**kwargs
):
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return self._wrap_partitions(
self.deploy_func_between_two_axis_partitions(
self.axis,
func,
num_splits,
len(self.list_of_blocks),
kwargs,
*tuple(self.list_of_blocks + other_axis_partition.list_of_blocks)
)
)
args = [self.axis, func, num_splits, kwargs, maintain_partitioning]
args.extend(self.list_of_blocks)
return self._wrap_partitions(self.deploy_axis_func(*args))
|
[
"Applies func to the object in the plasma store.\n\n See notes in Parent class about this method.\n\n Args:\n func: The function to apply.\n num_splits: The number of times to split the result object.\n other_axis_partition: Another `PandasOnRayFrameAxisPartition` object to apply to\n func with this one.\n maintain_partitioning: Whether or not to keep the partitioning in the same\n orientation as it was previously. This is important because we may be\n operating on an individual AxisPartition and not touching the rest.\n In this case, we have to return the partitioning to its previous\n orientation (the lengths will remain the same). This is ignored between\n two axis partitions.\n\n Returns:\n A list of `RayRemotePartition` objects.\n "
] |
Please provide a description of the function:def shuffle(self, func, lengths, **kwargs):
num_splits = len(lengths)
# We add these to kwargs and will pop them off before performing the operation.
kwargs["manual_partition"] = True
kwargs["_lengths"] = lengths
args = [self.axis, func, num_splits, kwargs, False]
args.extend(self.list_of_blocks)
return self._wrap_partitions(self.deploy_axis_func(*args))
|
[
"Shuffle the order of the data in this axis based on the `lengths`.\n\n Extends `BaseFrameAxisPartition.shuffle`.\n\n Args:\n func: The function to apply before splitting.\n lengths: The list of partition lengths to split the result into.\n\n Returns:\n A list of RemotePartition objects split by `lengths`.\n "
] |
Please provide a description of the function:def deploy_axis_func(
cls, axis, func, num_splits, kwargs, maintain_partitioning, *partitions
):
# Pop these off first because they aren't expected by the function.
manual_partition = kwargs.pop("manual_partition", False)
lengths = kwargs.pop("_lengths", None)
dataframe = pandas.concat(partitions, axis=axis, copy=False)
result = func(dataframe, **kwargs)
if isinstance(result, pandas.Series):
if num_splits == 1:
return result
return [result] + [pandas.Series([]) for _ in range(num_splits - 1)]
if manual_partition:
# The split function is expecting a list
lengths = list(lengths)
# We set lengths to None so we don't use the old lengths for the resulting partition
# layout. This is done if the number of splits is changing or we are told not to
# keep the old partitioning.
elif num_splits != len(partitions) or not maintain_partitioning:
lengths = None
else:
if axis == 0:
lengths = [len(part) for part in partitions]
if sum(lengths) != len(result):
lengths = None
else:
lengths = [len(part.columns) for part in partitions]
if sum(lengths) != len(result.columns):
lengths = None
return split_result_of_axis_func_pandas(axis, num_splits, result, lengths)
|
[
"Deploy a function along a full axis in Ray.\n\n Args:\n axis: The axis to perform the function along.\n func: The function to perform.\n num_splits: The number of splits to return\n (see `split_result_of_axis_func_pandas`)\n kwargs: A dictionary of keyword arguments.\n maintain_partitioning: If True, keep the old partitioning if possible.\n If False, create a new partition layout.\n partitions: All partitions that make up the full axis (row or column)\n\n Returns:\n A list of Pandas DataFrames.\n "
] |
Please provide a description of the function:def deploy_func_between_two_axis_partitions(
cls, axis, func, num_splits, len_of_left, kwargs, *partitions
):
lt_frame = pandas.concat(list(partitions[:len_of_left]), axis=axis, copy=False)
rt_frame = pandas.concat(list(partitions[len_of_left:]), axis=axis, copy=False)
result = func(lt_frame, rt_frame, **kwargs)
return split_result_of_axis_func_pandas(axis, num_splits, result)
|
[
"Deploy a function along a full axis between two data sets in Ray.\n\n Args:\n axis: The axis to perform the function along.\n func: The function to perform.\n num_splits: The number of splits to return\n (see `split_result_of_axis_func_pandas`).\n len_of_left: The number of values in `partitions` that belong to the\n left data set.\n kwargs: A dictionary of keyword arguments.\n partitions: All partitions that make up the full axis (row or column)\n for both data sets.\n\n Returns:\n A list of Pandas DataFrames.\n "
] |
Please provide a description of the function:def query(self, expr, **kwargs):
def gen_table_expr(table, expr):
resolver = {
name: FakeSeries(dtype.to_pandas_dtype())
for name, dtype in zip(table.schema.names, table.schema.types)
}
scope = Scope(level=0, resolvers=(resolver,))
return Expr(expr=expr, env=scope)
import pyarrow.gandiva as gandiva
unary_ops = {"~": "not"}
math_calls = {"log": "log", "exp": "exp", "log10": "log10", "cbrt": "cbrt"}
bin_ops = {
"+": "add",
"-": "subtract",
"*": "multiply",
"/": "divide",
"**": "power",
}
cmp_ops = {
"==": "equal",
"!=": "not_equal",
">": "greater_than",
"<": "less_than",
"<=": "less_than_or_equal_to",
">": "greater_than",
">=": "greater_than_or_equal_to",
"like": "like",
}
def build_node(table, terms, builder):
if isinstance(terms, Constant):
return builder.make_literal(
terms.value, (pa.from_numpy_dtype(terms.return_type))
)
if isinstance(terms, Term):
return builder.make_field(table.schema.field_by_name(terms.name))
if isinstance(terms, BinOp):
lnode = build_node(table, terms.lhs, builder)
rnode = build_node(table, terms.rhs, builder)
return_type = pa.from_numpy_dtype(terms.return_type)
if terms.op == "&":
return builder.make_and([lnode, rnode])
if terms.op == "|":
return builder.make_or([lnode, rnode])
if terms.op in cmp_ops:
assert return_type == pa.bool_()
return builder.make_function(
cmp_ops[terms.op], [lnode, rnode], return_type
)
if terms.op in bin_ops:
return builder.make_function(
bin_ops[terms.op], [lnode, rnode], return_type
)
if isinstance(terms, UnaryOp):
return_type = pa.from_numpy_dtype(terms.return_type)
return builder.make_function(
unary_ops[terms.op],
[build_node(table, terms.operand, builder)],
return_type,
)
if isinstance(terms, MathCall):
return_type = pa.from_numpy_dtype(terms.return_type)
childern = [
build_node(table, child, builder) for child in terms.operands
]
return builder.make_function(
math_calls[terms.op], childern, return_type
)
raise TypeError("Unsupported term type: %s" % terms)
def can_be_condition(expr):
if isinstance(expr.terms, BinOp):
if expr.terms.op in cmp_ops or expr.terms.op in ("&", "|"):
return True
elif isinstance(expr.terms, UnaryOp):
if expr.terms.op == "~":
return True
return False
def filter_with_selection_vector(table, s):
record_batch = table.to_batches()[0]
indices = s.to_array() # .to_numpy()
new_columns = [
pa.array(c.to_numpy()[indices]) for c in record_batch.columns
]
return pa.Table.from_arrays(new_columns, record_batch.schema.names)
def gandiva_query(table, query):
expr = gen_table_expr(table, query)
if not can_be_condition(expr):
raise ValueError("Root operation should be a filter.")
builder = gandiva.TreeExprBuilder()
root = build_node(table, expr.terms, builder)
cond = builder.make_condition(root)
filt = gandiva.make_filter(table.schema, cond)
sel_vec = filt.evaluate(table.to_batches()[0], pa.default_memory_pool())
result = filter_with_selection_vector(table, sel_vec)
return result
def gandiva_query2(table, query):
expr = gen_table_expr(table, query)
if not can_be_condition(expr):
raise ValueError("Root operation should be a filter.")
builder = gandiva.TreeExprBuilder()
root = build_node(table, expr.terms, builder)
cond = builder.make_condition(root)
filt = gandiva.make_filter(table.schema, cond)
return filt
def query_builder(arrow_table, **kwargs):
return gandiva_query(arrow_table, kwargs.get("expr", ""))
kwargs["expr"] = expr
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(
new_data, new_index, self.columns, self._dtype_cache
)
|
[
"Query columns of the DataManager with a boolean expression.\r\n\r\n Args:\r\n expr: Boolean expression to query the columns with.\r\n\r\n Returns:\r\n DataManager containing the rows where the boolean expression is satisfied.\r\n "
] |
Please provide a description of the function:def to_pandas(self):
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
dtype_dict = {
col_name: pandas.Series(dtype=self.dtypes[col_name])
for col_name in self.columns
}
df = pandas.DataFrame(dtype_dict, self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
|
[
"Converts Modin DataFrame to Pandas DataFrame.\r\n\r\n Returns:\r\n Pandas DataFrame of the DataManager.\r\n "
] |
Please provide a description of the function:def deploy_ray_axis_func(axis, func, num_splits, kwargs, *partitions):
table = concat_arrow_table_partitions(axis, partitions)
try:
result = func(table, **kwargs)
except Exception:
result = pyarrow.Table.from_pandas(func(table.to_pandas(), **kwargs))
return split_arrow_table_result(
axis, result, len(partitions), num_splits, table.schema.metadata
)
|
[
"Deploy a function along a full axis in Ray.\n\n Args:\n axis: The axis to perform the function along.\n func: The function to perform.\n num_splits: The number of splits to return\n (see `split_result_of_axis_func_pandas`)\n kwargs: A dictionary of keyword arguments.\n partitions: All partitions that make up the full axis (row or column)\n\n Returns:\n A list of Pandas DataFrames.\n "
] |
Please provide a description of the function:def deploy_ray_func_between_two_axis_partitions(
axis, func, num_splits, len_of_left, kwargs, *partitions
):
lt_table = concat_arrow_table_partitions(axis, partitions[:len_of_left])
rt_table = concat_arrow_table_partitions(axis, partitions[len_of_left:])
try:
result = func(lt_table, rt_table, **kwargs)
except Exception:
lt_frame = lt_table.from_pandas()
rt_frame = rt_table.from_pandas()
result = pyarrow.Table.from_pandas(func(lt_frame, rt_frame, **kwargs))
return split_arrow_table_result(
axis, result, len(result.num_rows), num_splits, result.schema.metadata
)
|
[
"Deploy a function along a full axis between two data sets in Ray.\n\n Args:\n axis: The axis to perform the function along.\n func: The function to perform.\n num_splits: The number of splits to return\n (see `split_result_of_axis_func_pandas`).\n len_of_left: The number of values in `partitions` that belong to the\n left data set.\n kwargs: A dictionary of keyword arguments.\n partitions: All partitions that make up the full axis (row or column)\n for both data sets.\n\n Returns:\n A list of Pandas DataFrames.\n "
] |
Please provide a description of the function:def apply(self, func, num_splits=None, other_axis_partition=None, **kwargs):
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_func_between_two_axis_partitions._remote(
args=(self.axis, func, num_splits, len(self.list_of_blocks), kwargs)
+ tuple(self.list_of_blocks + other_axis_partition.list_of_blocks),
num_return_vals=num_splits,
)
]
args = [self.axis, func, num_splits, kwargs]
args.extend(self.list_of_blocks)
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits)
]
|
[
"Applies func to the object in the plasma store.\n\n See notes in Parent class about this method.\n\n Args:\n func: The function to apply.\n num_splits: The number of times to split the result object.\n other_axis_partition: Another `PyarrowOnRayFrameAxisPartition` object to apply to\n func with this one.\n\n Returns:\n A list of `RayRemotePartition` objects.\n "
] |
Please provide a description of the function:def shuffle(self, func, num_splits=None, **kwargs):
if num_splits is None:
num_splits = len(self.list_of_blocks)
args = [self.axis, func, num_splits, kwargs]
args.extend(self.list_of_blocks)
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits)
]
|
[
"Shuffle the order of the data in this axis based on the `func`.\n\n Extends `BaseFrameAxisPartition.shuffle`.\n\n :param func:\n :param num_splits:\n :param kwargs:\n :return:\n "
] |
Please provide a description of the function:def deploy_ray_func(func, partition, kwargs):
try:
result = func(partition, **kwargs)
# Sometimes Arrow forces us to make a copy of an object before we operate
# on it. We don't want the error to propagate to the user, and we want to
# avoid copying unless we absolutely have to.
except Exception:
result = func(partition.to_pandas(), **kwargs)
if isinstance(result, pandas.Series):
result = pandas.DataFrame(result).T
if isinstance(result, pandas.DataFrame):
return pyarrow.Table.from_pandas(result)
return result
|
[
"Deploy a function to a partition in Ray.\n\n Args:\n func: The function to apply.\n partition: The partition to apply the function to.\n kwargs: A dictionary of keyword arguments for the function.\n\n Returns:\n The result of the function.\n "
] |
Please provide a description of the function:def get(self):
if len(self.call_queue):
return self.apply(lambda x: x).get()
return ray.get(self.oid)
|
[
"Gets the object out of the plasma store.\n\n Returns:\n The object from the plasma store.\n "
] |
Please provide a description of the function:def apply(self, func, **kwargs):
oid = self.oid
self.call_queue.append((func, kwargs))
def call_queue_closure(oid_obj, call_queues):
for func, kwargs in call_queues:
if isinstance(func, ray.ObjectID):
func = ray.get(func)
if isinstance(kwargs, ray.ObjectID):
kwargs = ray.get(kwargs)
oid_obj = func(oid_obj, **kwargs)
return oid_obj
oid = deploy_ray_func.remote(
call_queue_closure, oid, kwargs={"call_queues": self.call_queue}
)
self.call_queue = []
return PyarrowOnRayFramePartition(oid)
|
[
"Apply a function to the object stored in this partition.\n\n Note: It does not matter if func is callable or an ObjectID. Ray will\n handle it correctly either way. The keyword arguments are sent as a\n dictionary.\n\n Args:\n func: The function to apply.\n\n Returns:\n A RayRemotePartition object.\n "
] |
Please provide a description of the function:def to_pandas(self):
dataframe = self.get().to_pandas()
assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
return dataframe
|
[
"Convert the object stored in this partition to a Pandas DataFrame.\n\n Returns:\n A Pandas DataFrame.\n "
] |
Please provide a description of the function:def put(cls, obj):
return PyarrowOnRayFramePartition(ray.put(pyarrow.Table.from_pandas(obj)))
|
[
"Put an object in the Plasma store and wrap it in this object.\n\n Args:\n obj: The object to be put.\n\n Returns:\n A `RayRemotePartition` object.\n "
] |
Please provide a description of the function:def isna(obj):
if isinstance(obj, BasePandasDataset):
return obj.isna()
else:
return pandas.isna(obj)
|
[
"\n Detect missing values for an array-like object.\n Args:\n obj: Object to check for null or missing values.\n\n Returns:\n bool or array-like of bool\n "
] |
Please provide a description of the function:def merge(
left,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
if not isinstance(left, DataFrame):
raise ValueError(
"can not merge DataFrame with instance of type {}".format(type(right))
)
return left.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
|
[
"Database style join, where common columns in \"on\" are merged.\n\n Args:\n left: DataFrame.\n right: DataFrame.\n how: What type of join to use.\n on: The common column name(s) to join on. If None, and left_on and\n right_on are also None, will default to all commonly named\n columns.\n left_on: The column(s) on the left to use for the join.\n right_on: The column(s) on the right to use for the join.\n left_index: Use the index from the left as the join keys.\n right_index: Use the index from the right as the join keys.\n sort: Sort the join keys lexicographically in the result.\n suffixes: Add this suffix to the common names not in the \"on\".\n copy: Does nothing in our implementation\n indicator: Adds a column named _merge to the DataFrame with\n metadata from the merge about each row.\n validate: Checks if merge is a specific type.\n\n Returns:\n A merged Dataframe\n "
] |
Please provide a description of the function:def is_distributed(partition_column, lower_bound, upper_bound):
if (
(partition_column is not None)
and (lower_bound is not None)
and (upper_bound is not None)
):
if upper_bound > lower_bound:
return True
else:
raise InvalidArguments("upper_bound must be greater than lower_bound.")
elif (partition_column is None) and (lower_bound is None) and (upper_bound is None):
return False
else:
raise InvalidArguments(
"Invalid combination of partition_column, lower_bound, upper_bound."
"All these arguments should be passed (distributed) or none of them (standard pandas)."
)
|
[
" Check if is possible distribute a query given that args\n\n Args:\n partition_column: column used to share the data between the workers\n lower_bound: the minimum value to be requested from the partition_column\n upper_bound: the maximum value to be requested from the partition_column\n\n Returns:\n True for distributed or False if not\n "
] |
Please provide a description of the function:def is_table(engine, sql):
if engine.dialect.has_table(engine, sql):
return True
return False
|
[
" Check with the given sql arg is query or table\n\n Args:\n engine: SQLAlchemy connection engine\n sql: SQL query or table name\n\n Returns:\n True for table or False if not\n "
] |
Please provide a description of the function:def get_table_metadata(engine, table):
metadata = MetaData()
metadata.reflect(bind=engine, only=[table])
table_metadata = Table(table, metadata, autoload=True)
return table_metadata
|
[
" Extract all useful infos from the given table\n\n Args:\n engine: SQLAlchemy connection engine\n table: table name\n\n Returns:\n Dictionary of infos\n "
] |
Please provide a description of the function:def get_table_columns(metadata):
cols = OrderedDict()
for col in metadata.c:
name = str(col).rpartition(".")[2]
cols[name] = col.type.python_type.__name__
return cols
|
[
" Extract columns names and python typos from metadata\n\n Args:\n metadata: Table metadata\n\n Returns:\n dict with columns names and python types\n "
] |
Please provide a description of the function:def check_query(query):
q = query.lower()
if "select " not in q:
raise InvalidQuery("SELECT word not found in the query: {0}".format(query))
if " from " not in q:
raise InvalidQuery("FROM word not found in the query: {0}".format(query))
|
[
" Check query sanity\n\n Args:\n query: query string\n\n Returns:\n None\n "
] |
Please provide a description of the function:def get_query_columns(engine, query):
con = engine.connect()
result = con.execute(query).fetchone()
values = list(result)
cols_names = result.keys()
cols = OrderedDict()
for i in range(len(cols_names)):
cols[cols_names[i]] = type(values[i]).__name__
return cols
|
[
" Extract columns names and python typos from query\n\n Args:\n engine: SQLAlchemy connection engine\n query: SQL query\n\n Returns:\n dict with columns names and python types\n "
] |
Please provide a description of the function:def check_partition_column(partition_column, cols):
for k, v in cols.items():
if k == partition_column:
if v == "int":
return
else:
raise InvalidPartitionColumn(
"partition_column must be int, and not {0}".format(v)
)
raise InvalidPartitionColumn(
"partition_column {0} not found in the query".format(partition_column)
)
|
[
" Check partition_column existence and type\n\n Args:\n partition_column: partition_column name\n cols: dict with columns names and python types\n\n Returns:\n None\n "
] |
Please provide a description of the function:def get_query_info(sql, con, partition_column):
engine = create_engine(con)
if is_table(engine, sql):
table_metadata = get_table_metadata(engine, sql)
query = build_query_from_table(sql)
cols = get_table_columns(table_metadata)
else:
check_query(sql)
query = sql.replace(";", "")
cols = get_query_columns(engine, query)
# TODO allow validation that takes into account edge cases of pandas e.g. "[index]"
# check_partition_column(partition_column, cols)
cols_names = list(cols.keys())
return cols_names, query
|
[
" Return a columns name list and the query string\n\n Args:\n sql: SQL query or table name\n con: database connection or url string\n partition_column: column used to share the data between the workers\n\n Returns:\n Columns name list and query string\n "
] |
Please provide a description of the function:def query_put_bounders(query, partition_column, start, end):
where = " WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}".format(
partition_column, start, end
)
query_with_bounders = "SELECT * FROM ({0}) AS TMP_TABLE {1}".format(query, where)
return query_with_bounders
|
[
" Put bounders in the query\n\n Args:\n query: SQL query string\n partition_column: partition_column name\n start: lower_bound\n end: upper_bound\n\n Returns:\n Query with bounders\n "
] |
Please provide a description of the function:def compute_index(self, axis, data_object, compute_diff=True):
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
|
[
"Computes the index after a number of rows have been removed.\n\n Note: In order for this to be used properly, the indexes must not be\n changed before you compute this.\n\n Args:\n axis: The axis to extract the index from.\n data_object: The new data object to extract the index from.\n compute_diff: True to use `self` to compute the index from self\n rather than data_object. This is used when the dimension of the\n index may have changed, but the deleted rows/columns are\n unknown.\n\n Returns:\n A new pandas.Index object.\n "
] |
Please provide a description of the function:def _prepare_method(self, pandas_func, **kwargs):
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
|
[
"Prepares methods given various metadata.\n Args:\n pandas_func: The function to prepare.\n\n Returns\n Helper function which handles potential transpose.\n "
] |
Please provide a description of the function:def numeric_columns(self, include_bool=True):
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
|
[
"Returns the numeric columns of the Manager.\n\n Returns:\n List of index names.\n "
] |
Please provide a description of the function:def numeric_function_clean_dataframe(self, axis):
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
|
[
"Preprocesses numeric functions to clean dataframe and pick numeric indices.\n\n Args:\n axis: '0' if columns and '1' if rows.\n\n Returns:\n Tuple with return value(if any), indices to apply func to & cleaned Manager.\n "
] |
Please provide a description of the function:def _join_index_objects(self, axis, other_index, how, sort=True):
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
|
[
"Joins a pair of index objects (columns or rows) by a given strategy.\n\n Args:\n axis: The axis index object to join (0 for columns, 1 for index).\n other_index: The other_index to join on.\n how: The type of join to join to make (e.g. right, left).\n\n Returns:\n Joined indices.\n "
] |
Please provide a description of the function:def join(self, other, **kwargs):
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs)
|
[
"Joins a list or two objects together.\n\n Args:\n other: The other object(s) to join on.\n\n Returns:\n Joined objects.\n "
] |
Please provide a description of the function:def concat(self, axis, other, **kwargs):
return self._append_list_of_managers(other, axis, **kwargs)
|
[
"Concatenates two objects together.\n\n Args:\n axis: The axis index object to join (0 for columns, 1 for index).\n other: The other_index to concat with.\n\n Returns:\n Concatenated objects.\n "
] |
Please provide a description of the function:def copartition(self, axis, other, how_to_join, sort, force_repartition=False):
if isinstance(other, type(self)):
other = [other]
index_obj = (
[o.index for o in other] if axis == 0 else [o.columns for o in other]
)
joined_index = self._join_index_objects(
axis ^ 1, index_obj, how_to_join, sort=sort
)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.index if axis == 0 else self.columns
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
reindexed_self = self.data
reindexed_other_list = []
def compute_reindex(old_idx):
def reindex_partition(df):
if axis == 0:
df.index = old_idx
new_df = df.reindex(index=joined_index)
new_df.index = pandas.RangeIndex(len(new_df.index))
else:
df.columns = old_idx
new_df = df.reindex(columns=joined_index)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
return reindex_partition
for i in range(len(other)):
# If the indices are equal we can skip partitioning so long as we are not
# forced to repartition. See note above about `force_repartition`.
if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition):
reindex_left = None
else:
reindex_left = self._prepare_method(compute_reindex(left_old_idx))
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindex_right = None
else:
reindex_right = other[i]._prepare_method(
compute_reindex(right_old_idxes[i])
)
reindexed_self, reindexed_other = reindexed_self.copartition_datasets(
axis, other[i].data, reindex_left, reindex_right
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
[
"Copartition two QueryCompiler objects.\n\n Args:\n axis: The axis to copartition along.\n other: The other Query Compiler(s) to copartition against.\n how_to_join: How to manage joining the index object (\"left\", \"right\", etc.)\n sort: Whether or not to sort the joined index.\n force_repartition: Whether or not to force the repartitioning. By default,\n this method will skip repartitioning if it is possible. This is because\n reindexing is extremely inefficient. Because this method is used to\n `join` or `append`, it is vital that the internal indices match.\n\n Returns:\n A tuple (left query compiler, right query compiler list, joined index).\n ",
"Create a function based on the old index and axis.\n\n Args:\n old_idx: The old index/columns\n\n Returns:\n A function that will be run in each partition.\n "
] |
Please provide a description of the function:def to_pandas(self):
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
|
[
"Converts Modin DataFrame to Pandas DataFrame.\n\n Returns:\n Pandas DataFrame of the DataManager.\n "
] |
Please provide a description of the function:def from_pandas(cls, df, block_partitions_cls):
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
|
[
"Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.\n\n Args:\n cls: DataManger object to convert the DataFrame to.\n df: Pandas DataFrame object.\n block_partitions_cls: BlockParitions object to store partitions\n\n Returns:\n Returns DataManager containing data from the Pandas DataFrame.\n "
] |
Please provide a description of the function:def _inter_manager_operations(self, other, how_to_join, func):
reindexed_self, reindexed_other_list, joined_index = self.copartition(
0, other, how_to_join, False
)
# unwrap list returned by `copartition`.
reindexed_other = reindexed_other_list[0]
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, func):
left.columns = self_cols
right.columns = other_cols
# We reset here to make sure that the internal indexes match. We aligned
# them in the previous step, so this step is to prevent mismatches.
left.index = pandas.RangeIndex(len(left.index))
right.index = pandas.RangeIndex(len(right.index))
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other
)
return self.__constructor__(new_data, joined_index, new_columns)
|
[
"Inter-data operations (e.g. add, sub).\n\n Args:\n other: The other Manager for the operation.\n how_to_join: The type of join to join to make (e.g. right, outer).\n\n Returns:\n New DataManager with new data and index.\n "
] |
Please provide a description of the function:def _inter_df_op_handler(self, func, other, **kwargs):
axis = kwargs.get("axis", 0)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(other, type(self)):
return self._inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self._scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
|
[
"Helper method for inter-manager and scalar operations.\n\n Args:\n func: The function to use on the Manager/scalar.\n other: The other Manager/scalar.\n\n Returns:\n New DataManager with new data and index.\n "
] |
Please provide a description of the function:def binary_op(self, op, other, **kwargs):
func = getattr(pandas.DataFrame, op)
return self._inter_df_op_handler(func, other, **kwargs)
|
[
"Perform an operation between two objects.\n\n Note: The list of operations is as follows:\n - add\n - eq\n - floordiv\n - ge\n - gt\n - le\n - lt\n - mod\n - mul\n - ne\n - pow\n - rfloordiv\n - rmod\n - rpow\n - rsub\n - rtruediv\n - sub\n - truediv\n - __and__\n - __or__\n - __xor__\n Args:\n op: The operation. See list of operations above\n other: The object to operate against.\n\n Returns:\n A new QueryCompiler object.\n "
] |
Please provide a description of the function:def update(self, other, **kwargs):
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
|
[
"Uses other manager to update corresponding values in this manager.\n\n Args:\n other: The other manager.\n\n Returns:\n New DataManager with updated data and index.\n "
] |
Please provide a description of the function:def where(self, cond, other, **kwargs):
assert isinstance(
cond, type(self)
), "Must have the same DataManager subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
first_pass = cond._inter_manager_operations(
other, "left", where_builder_first_pass
)
final_pass = self._inter_manager_operations(
first_pass, "left", where_builder_second_pass
)
return self.__constructor__(final_pass.data, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = pandas.RangeIndex(len(other.index))
def where_builder_series(df, cond):
if axis == 0:
df.index = pandas.RangeIndex(len(df.index))
cond.index = pandas.RangeIndex(len(cond.index))
else:
df.columns = pandas.RangeIndex(len(df.columns))
cond.columns = pandas.RangeIndex(len(cond.columns))
return df.where(cond, other, **kwargs)
reindexed_self, reindexed_cond, a = self.copartition(
axis, cond, "left", False
)
# Unwrap from list given by `copartition`
reindexed_cond = reindexed_cond[0]
new_data = reindexed_self.inter_data_operation(
axis, lambda l, r: where_builder_series(l, r), reindexed_cond
)
return self.__constructor__(new_data, self.index, self.columns)
|
[
"Gets values from this manager where cond is true else from other.\n\n Args:\n cond: Condition on which to evaluate values.\n\n Returns:\n New DataManager with updated data and index.\n "
] |
Please provide a description of the function:def _scalar_operations(self, axis, scalar, func):
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_index = self.index if axis == 0 else self.columns
def list_like_op(df):
if axis == 0:
df.index = new_index
else:
df.columns = new_index
return func(df)
new_data = self._map_across_full_axis(
axis, self._prepare_method(list_like_op)
)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self._map_partitions(self._prepare_method(func))
|
[
"Handler for mapping scalar operations across a Manager.\n\n Args:\n axis: The axis index object to execute the function on.\n scalar: The scalar value to map.\n func: The function to use on the Manager with the scalar.\n\n Returns:\n A new QueryCompiler with updated data and new index.\n "
] |
Please provide a description of the function:def reindex(self, axis, labels, **kwargs):
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
|
[
"Fits a new index for this Manger.\n\n Args:\n axis: The axis index object to target the reindex on.\n labels: New labels to conform 'axis' on to.\n\n Returns:\n A new QueryCompiler with updated data and new index.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.