python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
## @package BlobWeightedSum
# Module caffe2.python.layers.blob_weighted_sum
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
class BlobWeightedSum(ModelLayer):
"""
This layer implements the weighted sum:
weighted element-wise sum of input blobs.
"""
def __init__(
self,
model,
input_record,
init_weights=None,
weight_optim=None,
name='blob_weighted_sum',
**kwargs
):
super(BlobWeightedSum, self).__init__(model, name, input_record, **kwargs)
self.blobs = self.input_record.field_blobs()
self.num_weights = len(self.blobs)
assert self.num_weights > 1, (
"BlobWeightedSum expects more than one input blobs"
)
assert len(input_record.field_types()[0].shape) > 0, (
"BlobWeightedSum expects limited dimensions of the input tensor"
)
assert all(
input_record.field_types()[0].shape == input_record.field_types()[i].shape
for i in range(1, self.num_weights)
), "Shape of input blobs should be the same shape {}".format(
input_record.field_types()[0].shape
)
if init_weights:
assert self.num_weights == len(init_weights), (
"the size of init_weights should be the same as input blobs, "
"expects {}, got {}".format(self.num_weights, len(init_weights))
)
else:
init_weights = [1.0] * self.num_weights
self.weights = [
self.create_param(
param_name="w_{}".format(idx),
shape=[1],
initializer=('ConstantFill', {'value': float(init_weights[idx])}),
optimizer=weight_optim
) for idx in range(self.num_weights)
]
self.output_schema = schema.Scalar(
input_record.field_types()[0],
self.get_next_blob_reference('blob_weighted_sum_out')
)
def add_ops(self, net):
net.WeightedSum(
[x for pair in zip(self.blobs, self.weights) for x in pair],
self.output_schema(),
grad_on_w=True,
)
|
pytorch-master
|
caffe2/python/layers/blob_weighted_sum.py
|
## @package batch_lr_loss
# Module caffe2.python.layers.batch_lr_loss
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchLRLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_lr_loss',
average_loss=True,
jsd_weight=0.0,
pos_label_target=1.0,
neg_label_target=0.0,
homotopy_weighting=False,
log_D_trick=False,
unjoined_lr_loss=False,
uncertainty_penalty=1.0,
focal_gamma=0.0,
stop_grad_in_focal_factor=False,
task_gamma=1.0,
task_gamma_lb=0.1,
**kwargs
):
super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)
self.average_loss = average_loss
assert (schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('logit', schema.Scalar())
),
input_record
))
self.jsd_fuse = False
assert jsd_weight >= 0 and jsd_weight <= 1
if jsd_weight > 0 or homotopy_weighting:
assert 'prediction' in input_record
self.init_weight(jsd_weight, homotopy_weighting)
self.jsd_fuse = True
self.homotopy_weighting = homotopy_weighting
assert pos_label_target <= 1 and pos_label_target >= 0
assert neg_label_target <= 1 and neg_label_target >= 0
assert pos_label_target >= neg_label_target
self.pos_label_target = pos_label_target
self.neg_label_target = neg_label_target
assert not (log_D_trick and unjoined_lr_loss)
self.log_D_trick = log_D_trick
self.unjoined_lr_loss = unjoined_lr_loss
assert uncertainty_penalty >= 0
self.uncertainty_penalty = uncertainty_penalty
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output')
)
self.focal_gamma = focal_gamma
self.stop_grad_in_focal_factor = stop_grad_in_focal_factor
self.apply_exp_decay = False
if task_gamma < 1.0:
self.apply_exp_decay = True
self.task_gamma_cur = self.create_param(
param_name=('%s_task_gamma_cur' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 1.0,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
self.task_gamma = self.create_param(
param_name=('%s_task_gamma' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': task_gamma,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
self.task_gamma_lb = self.create_param(
param_name=('%s_task_gamma_lb' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': task_gamma_lb,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
def init_weight(self, jsd_weight, homotopy_weighting):
if homotopy_weighting:
self.mutex = self.create_param(
param_name=('%s_mutex' % self.name),
shape=None,
initializer=('CreateMutex', ),
optimizer=self.model.NoOptim,
)
self.counter = self.create_param(
param_name=('%s_counter' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64
}
),
optimizer=self.model.NoOptim,
)
self.xent_weight = self.create_param(
param_name=('%s_xent_weight' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 1.,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
self.jsd_weight = self.create_param(
param_name=('%s_jsd_weight' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0.,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
else:
self.jsd_weight = self.model.add_global_constant(
'%s_jsd_weight' % self.name, jsd_weight
)
self.xent_weight = self.model.add_global_constant(
'%s_xent_weight' % self.name, 1. - jsd_weight
)
def update_weight(self, net):
net.AtomicIter([self.mutex, self.counter], [self.counter])
# iter = 0: lr = 1;
# iter = 1e6; lr = 0.5^0.1 = 0.93
# iter = 1e9; lr = 1e-3^0.1 = 0.50
net.LearningRate([self.counter], [self.xent_weight], base_lr=1.0,
policy='inv', gamma=1e-6, power=0.1,)
net.Sub(
[self.model.global_constants['ONE'], self.xent_weight],
[self.jsd_weight]
)
return self.xent_weight, self.jsd_weight
def add_ops(self, net):
# numerically stable log-softmax with crossentropy
label = self.input_record.label()
# mandatory cast to float32
# self.input_record.label.field_type().base is np.float32 but
# label type is actually int
label = net.Cast(
label,
net.NextScopedBlob('label_float32'),
to=core.DataType.FLOAT)
label = net.ExpandDims(label, net.NextScopedBlob('expanded_label'),
dims=[1])
if self.pos_label_target != 1.0 or self.neg_label_target != 0.0:
label = net.StumpFunc(
label,
net.NextScopedBlob('smoothed_label'),
threshold=0.5,
low_value=self.neg_label_target,
high_value=self.pos_label_target,
)
xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), label],
net.NextScopedBlob('cross_entropy'),
log_D_trick=self.log_D_trick,
unjoined_lr_loss=self.unjoined_lr_loss
)
if self.focal_gamma != 0:
label = net.StopGradient(
[label],
[net.NextScopedBlob('label_stop_gradient')],
)
prediction = self.input_record.prediction()
# focal loss = (y(1-p) + p(1-y))^gamma * original LR loss
# y(1-p) + p(1-y) = y + p - 2 * yp
y_plus_p = net.Add(
[prediction, label],
net.NextScopedBlob("y_plus_p"),
)
yp = net.Mul([prediction, label], net.NextScopedBlob("yp"))
two_yp = net.Scale(yp, net.NextScopedBlob("two_yp"), scale=2.0)
y_plus_p_sub_two_yp = net.Sub(
[y_plus_p, two_yp], net.NextScopedBlob("y_plus_p_sub_two_yp")
)
focal_factor = net.Pow(
y_plus_p_sub_two_yp,
net.NextScopedBlob("y_plus_p_sub_two_yp_power"),
exponent=float(self.focal_gamma),
)
if self.stop_grad_in_focal_factor is True:
focal_factor = net.StopGradient(
[focal_factor],
[net.NextScopedBlob("focal_factor_stop_gradient")],
)
xent = net.Mul(
[xent, focal_factor], net.NextScopedBlob("focallossxent")
)
if self.apply_exp_decay:
net.Mul(
[self.task_gamma_cur, self.task_gamma],
self.task_gamma_cur
)
task_gamma_multiplier = net.Max(
[self.task_gamma_cur, self.task_gamma_lb],
net.NextScopedBlob("task_gamma_cur_multiplier")
)
xent = net.Mul(
[xent, task_gamma_multiplier], net.NextScopedBlob("expdecayxent")
)
# fuse with JSD
if self.jsd_fuse:
jsd = net.BernoulliJSD(
[self.input_record.prediction(), label],
net.NextScopedBlob('jsd'),
)
if self.homotopy_weighting:
self.update_weight(net)
loss = net.WeightedSum(
[xent, self.xent_weight, jsd, self.jsd_weight],
net.NextScopedBlob('loss'),
)
else:
loss = xent
if 'log_variance' in self.input_record.fields:
# mean (0.5 * exp(-s) * loss + 0.5 * penalty * s)
log_variance_blob = self.input_record.log_variance()
log_variance_blob = net.ExpandDims(
log_variance_blob, net.NextScopedBlob('expanded_log_variance'),
dims=[1]
)
neg_log_variance_blob = net.Negative(
[log_variance_blob],
net.NextScopedBlob('neg_log_variance')
)
# enforce less than 88 to avoid OverflowError
neg_log_variance_blob = net.Clip(
[neg_log_variance_blob],
net.NextScopedBlob('clipped_neg_log_variance'),
max=88.0
)
exp_neg_log_variance_blob = net.Exp(
[neg_log_variance_blob],
net.NextScopedBlob('exp_neg_log_variance')
)
exp_neg_log_variance_loss_blob = net.Mul(
[exp_neg_log_variance_blob, loss],
net.NextScopedBlob('exp_neg_log_variance_loss')
)
penalized_uncertainty = net.Scale(
log_variance_blob, net.NextScopedBlob("penalized_unceratinty"),
scale=float(self.uncertainty_penalty)
)
loss_2x = net.Add(
[exp_neg_log_variance_loss_blob, penalized_uncertainty],
net.NextScopedBlob('loss')
)
loss = net.Scale(loss_2x, net.NextScopedBlob("loss"), scale=0.5)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
loss = net.Mul(
[loss, weight_blob],
net.NextScopedBlob('weighted_cross_entropy'),
)
if self.average_loss:
net.AveragedLoss(loss, self.output_schema.field_blobs())
else:
net.ReduceFrontSum(loss, self.output_schema.field_blobs())
|
pytorch-master
|
caffe2/python/layers/batch_lr_loss.py
|
## @package gather_record
# Module caffe2.python.layers.gather_record
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class GatherRecord(ModelLayer):
"""
Given 1-D `indices` tensor, gather elements at `i` in `indices` from all the
blobs in `record`. If a blob is a values blob of a list, all the elements
included by the list's lengths blob are gathered. For example,
Input:
indices = [0, 2]
record:a = [[0, 1], [2, 3], [4, 5], [6, 7]]
record:b:lengths = [0, 1, 2, 3]
record:b:items = [0, 1, 2, 3, 4, 5]
Output:
a = [[0, 1], [4, 5]]
b:lengths = [0, 2]
b:items = [1, 2]
This supports nested list.
"""
def __init__(self, model, input_record, name='gather_record', **kwargs):
super(GatherRecord, self).__init__(model, name, input_record, **kwargs)
assert 'indices' in input_record
assert 'record' in input_record
self.output_schema = schema.NewRecord(
model.net, input_record.record.clone_schema())
self._indices = self.input_record.indices()
def _gather_scalar(self, net, record, lengths_blob, output_record):
if lengths_blob is None:
net.Gather([record(), self._indices], output_record())
else:
net.LengthsGather([record(), lengths_blob, self._indices],
output_record())
def _gather_struct(self, net, record, lengths_blob, output_record):
for name, field in record.get_children():
self._dispatch(net, field, lengths_blob, output_record[name])
def _gather_list(self, net, record, lengths_blob, output_record):
self._gather_scalar(
net, record.lengths, lengths_blob, output_record.lengths)
if lengths_blob is None:
lengths_blob = record.lengths()
else:
# TODO(kittipat): This is a hacky solution until LengthsSum for int
# is implemented
lengths_float = net.Cast(
record.lengths(),
net.NextScopedBlob(str(record.lengths()) + '_float'),
to=core.DataType.FLOAT,
)
lengths_blob_float = net.LengthsSum(
[lengths_float, lengths_blob],
net.NextScopedBlob(str(record.lengths()) + "_nested_float")
)
lengths_blob = net.Cast(
lengths_blob_float,
net.NextScopedBlob(str(record.lengths()) + "_nested"),
to=core.DataType.INT32,
)
self._dispatch(net, record._items, lengths_blob, output_record._items)
def _dispatch(self, net, record, lengths_blob, output_record):
if isinstance(record, schema.Scalar):
self._gather_scalar(net, record, lengths_blob, output_record)
elif isinstance(record, schema.Struct):
self._gather_struct(net, record, lengths_blob, output_record)
elif isinstance(record, schema.List):
self._gather_list(net, record, lengths_blob, output_record)
else:
raise NotImplementedError
def add_ops(self, net):
self._dispatch(net, self.input_record.record, None, self.output_schema)
|
pytorch-master
|
caffe2/python/layers/gather_record.py
|
# @package sparse_to_dense
# Module caffe2.python.layers.sparse_to_dense
from collections import defaultdict
import numpy as np
from caffe2.python import schema
from caffe2.python.layers.layers import AccessedFeatures, ModelLayer
class FeatureSparseToDense(ModelLayer):
def __init__(
self,
model,
input_record,
input_specs,
name="feature_sparse_to_dense",
default_dense_value=None,
**kwargs
):
"""
`input_specs` follows the format of FeatureSpec from schema. To be more
precise it's a namedtuple that should have:
'feature_type', 'feature_names', 'feature_ids'
Default_dense_value can only be 0.0 or float("NaN"). Any input that isn't
None will be NaN.
"""
super(FeatureSparseToDense, self).__init__(model, name, input_record, **kwargs)
if default_dense_value is None:
default_dense_value = 0.0
default_dense_value = float(default_dense_value)
assert (
np.isnan(default_dense_value) or default_dense_value == 0.0
), "default_dense_value can only be 0.0 or NaN"
self.input_specs = input_specs
self.default_float_value = (
model.global_constants["NAN"]
if np.isnan(default_dense_value)
else model.global_constants["ZERO"]
)
self.zero_range = model.global_constants["ZERO_RANGE"]
outputs = []
for field, feature_specs in self.input_specs:
assert len(feature_specs.feature_names) == len(feature_specs.feature_ids)
if feature_specs.feature_type == "FLOAT":
outputs.append(
(
field,
schema.Scalar(
(np.float32, (len(feature_specs.feature_ids),)),
self.get_next_blob_reference(field + "_output"),
),
)
)
elif feature_specs.feature_type == "ID_LIST":
outputs.append(
(
field,
schema.Struct(
(
"ranges",
schema.Scalar(
(np.int32, (len(feature_specs.feature_ids), 2)),
self.get_next_blob_reference(field + "_ranges"),
),
),
(
"values",
schema.Scalar(
np.int64,
self.get_next_blob_reference(field + "_values"),
),
),
),
)
)
elif feature_specs.feature_type == "ID_SCORE_LIST":
outputs.append(
(
field,
schema.Struct(
(
"ranges",
schema.Scalar(
(np.int32, (len(feature_specs.feature_ids), 2)),
self.get_next_blob_reference(field + "_ranges"),
),
),
(
"ids",
schema.Scalar(
np.int64,
self.get_next_blob_reference(field + "_ids"),
),
),
(
"scores",
schema.Scalar(
np.float32,
self.get_next_blob_reference(field + "_scores"),
),
),
),
)
)
elif feature_specs.feature_type == "EMBEDDING":
# We don't know dimensions of embeddings in input data.
# Even though they should match dimensions from feature config,
# we keep ranges blob to check input data later.
outputs.append(
(
field,
schema.Struct(
(
"ranges",
schema.Scalar(
(np.int32, (len(feature_specs.feature_ids), 2)),
self.get_next_blob_reference(field + "_ranges"),
),
),
(
"values",
schema.Scalar(
np.float32,
self.get_next_blob_reference(field + "_values"),
),
),
),
)
)
elif feature_specs.feature_type == "GENERIC_FEATURE":
# We don't know dimensions of embeddings in input data.
# Even though they should match dimensions from feature config,
# we keep ranges blob to check input data later.
# Currently this schema with ranges and values is only for
# generic type enum 1. If new types are implemented, we need to
# modify the ParseGeneric operator, and this part accordingly
outputs.append(
(
field,
schema.Struct(
(
"ranges",
schema.Scalar(
(np.int32, (len(feature_specs.feature_ids), 2)),
self.get_next_blob_reference(field + "_ranges"),
),
),
(
"values",
schema.Scalar(
np.float32,
self.get_next_blob_reference(field + "_values"),
),
),
),
)
)
else:
raise TypeError(
"Unsupported input type: {0}".format(feature_specs.feature_type)
)
# TODO(amalevich): This schema is producing ranges. And thus if there is
# something using it it should support ranges as well. It might be
# confusing, if we don't add better support for ranges/have it as a
# first layer
self.output_schema = schema.Struct(*outputs)
# TODO(amalevich): Consider moving this data to schema, instead
# Structs doesn't support attaching metadata to them and clonning
# will break things badly, but this is the most elegant way to pass
# this info around. Should we change it or it'll be too much work and
# not worse it?
for field, feature_specs in input_specs:
schema.attach_metadata_to_scalars(
self.output_schema[field], schema.Metadata(feature_specs=feature_specs)
)
# Add operators to all types that need to be densified
def add_ops(self, net):
record = self.input_record
for field, feature_specs in self.input_specs:
if feature_specs.feature_type == "FLOAT":
net.SparseToDenseMask(
[
record[field].keys(),
record[field].values(),
self.default_float_value,
record[field].lengths(),
],
[self.output_schema[field]()],
mask=feature_specs.feature_ids,
)
elif feature_specs.feature_type == "ID_LIST":
id_list_ranges = net.LengthsToRanges(
record[field].values.lengths(), net.NextScopedBlob("id_list_ranges")
)
net.SparseToDenseMask(
[
record[field].keys(),
id_list_ranges,
self.zero_range,
record[field].lengths(),
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(
record[field].values.items(), self.output_schema[field].values()
)
elif feature_specs.feature_type == "ID_SCORE_LIST":
# TODO: merge this to the case above?
id_list_ranges = net.LengthsToRanges(
record[field].values.lengths(),
net.NextScopedBlob("id_score_list_ranges"),
)
net.SparseToDenseMask(
[
record[field].keys(),
id_list_ranges,
self.zero_range,
record[field].lengths(),
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(record[field].values.keys(), self.output_schema[field].ids())
net.Alias(
record[field].values.values(), self.output_schema[field].scores()
)
elif feature_specs.feature_type == "EMBEDDING":
ranges = net.LengthsToRanges(
record[field].values.lengths(),
net.NextScopedBlob("embeddings_ranges"),
)
net.SparseToDenseMask(
[
record[field].keys(),
ranges,
self.zero_range,
record[field].lengths(),
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(
record[field].values.items(), self.output_schema[field].values()
)
elif feature_specs.feature_type == "GENERIC_FEATURE":
(
feature_lengths_blob,
feature_ids_blob,
value_lengths_blob,
value_values_blob,
) = net.ParseGeneric(
[record[field]()],
["feature_lengths", "feature_ids", "value_lengths", "value_values"],
feature_type_enum=1,
)
# Currently our implementation only supports
# generic type enum 1. If new types are implemented, we need to
# modify the ParseGeneric operator, the schema above,
# and this part accordingly to parse the generic feature strings
# into input_record
ranges = net.LengthsToRanges(
value_lengths_blob, net.NextScopedBlob("generics_ranges")
)
net.SparseToDenseMask(
[feature_ids_blob, ranges, self.zero_range, feature_lengths_blob],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(value_values_blob, self.output_schema[field].values())
def get_metadata(self):
metadata = []
for field, feature_specs in self.input_specs:
metadata.append(
(
{
"type": feature_specs.feature_type,
"names": feature_specs.feature_names,
"ids": feature_specs.feature_ids,
},
self.output_schema[field].field_blobs(),
self.output_schema[field].field_types(),
)
)
if feature_specs.feature_type == "FLOAT":
metadata[-1][0]["cardinality"] = 1
return metadata
def get_accessed_features(self):
accessed_features = defaultdict(list)
# The features that are accessed are just those features that appear in
# the input specs
for field, feature_specs in self.input_specs:
accessed_features[field].append(
AccessedFeatures(
feature_specs.feature_type, set(feature_specs.feature_ids)
)
)
return accessed_features
|
pytorch-master
|
caffe2/python/layers/feature_sparse_to_dense.py
|
# @package functional
# Module caffe2.python.layers.functional
from caffe2.python import core, schema, scope, workspace
from caffe2.python.layers.layers import (
ModelLayer,
)
import caffe2.proto.caffe2_pb2 as caffe2_pb2
import numpy as np
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Functional(ModelLayer):
def __init__(self, model, input_record, output_names_or_num, function,
name='functional', output_dtypes=None, tags=None, **kwargs):
# allow coercion
input_record = schema.as_record(input_record)
super(Functional, self).__init__(model, name, input_record, tags=tags, **kwargs)
self._function = function
self._kwargs = kwargs
return_struct = (
isinstance(output_names_or_num, list) or
(isinstance(output_names_or_num, int) and
output_names_or_num != 1)
)
with scope.NameScope(self.name, reset=True):
if isinstance(output_names_or_num, int):
struct_output_schema = schema.NewRecord(
model.net, schema.RawTuple(output_names_or_num))
elif isinstance(output_names_or_num, schema.Field):
self.output_schema = output_names_or_num.clone(keep_blobs=True)
return
else:
if not isinstance(output_names_or_num, list):
output_names_or_num = [output_names_or_num]
out_tuple = [(out, np.void) for out in output_names_or_num]
struct_output_schema = schema.NewRecord(
model.net, schema.Struct(*out_tuple))
num_outputs = len(struct_output_schema.field_blobs())
# functional layer returns Struct if more than one outputs or output is
# a list, otherwise Scalar
if return_struct:
self.output_schema = struct_output_schema
else:
self.output_schema = struct_output_schema[0]
# If output_dtypes is provided, use it for output schema. Otherwise
# the shape and type will be inferred.
if output_dtypes is not None:
if not isinstance(output_dtypes, list):
output_dtypes = [output_dtypes] * num_outputs
assert len(output_dtypes) == num_outputs
for dtype, scalar in zip(output_dtypes,
self.output_schema.all_scalars()):
scalar.set_type(dtype)
return
# Fake execution of the function to infer shapes and types automatically
had_issues = False
try:
type_net = core.Net('_temp_type_and_shape_inference_net')
schema.InitEmptyRecord(type_net, input_record, enforce_types=True)
function(type_net, self.input_record, self.output_schema, **kwargs)
(shapes, types) = workspace.InferShapesAndTypes([type_net], {})
for i in range(num_outputs):
scalar_schema = (self.output_schema[i] if return_struct
else self.output_schema)
blob = scalar_schema()
if blob not in types or blob not in shapes:
had_issues = True
continue
if shapes[blob] == []:
# Scalar type
shape = tuple()
elif shapes[blob][0] == 0:
shape = tuple(shapes[blob][1:])
else:
logger.warning("unexpected shape: {}".format(shapes[blob]))
# If batch dimension is not first - give up on shape
# inference for that blob
had_issues = True
continue
# TODO(amalevich): Move it to some shared library
dtype = None
if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
dtype = (np.float64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
dtype = (np.float32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT32:
dtype = (np.int32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT64:
dtype = (np.int64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT16:
dtype = (np.float16, shape)
if dtype is not None:
scalar_schema.set_type(dtype)
except TypeError as ex:
had_issues = True
logger.warning(str(ex))
if had_issues:
logger.warning(
"Type inference had problems for layer: {}".format(self.name))
def add_ops(self, net):
self._function(
net, self.input_record, self.output_schema, **(self._kwargs))
|
pytorch-master
|
caffe2/python/layers/functional.py
|
import logging
from caffe2.python import schema
from caffe2.python.layers.layers import (
InstantiationContext,
ModelLayer,
)
logger = logging.getLogger(__name__)
class SelectRecordByContext(ModelLayer):
"""
Allowing model to follow different paths for each instantiation context and
join later at some point. The implementation use `Alias` because schema
sometimes clone fields internally so we need static blob name for output
"""
def __init__(
self,
model,
input_record,
name='select_record_by_context',
check_field_metas=True,
use_copy=False,
default_output_record_field=None,
**kwargs
):
super(SelectRecordByContext, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Struct)
assert len(input_record) > 1
self.use_copy = use_copy
self.default_output_record = (
input_record[default_output_record_field]
if (default_output_record_field is not None) else None
)
ref_record = input_record[0]
for record in input_record:
assert schema.equal_schemas(record, ref_record,
check_field_metas=check_field_metas)
self.output_schema = schema.NewRecord(model.net, ref_record)
def _set_output_blobs(self, net, context):
record = self.input_record.get(context, self.default_output_record)
assert record is not None, (
"{} context is not in input record without providing default"
" output".format(context)
)
for in_blob, out_blob in zip(
record.field_blobs(), self.output_schema.field_blobs()
):
if self.use_copy:
net.Copy(in_blob, out_blob)
else:
net.Alias(in_blob, out_blob)
def add_ops(self, net):
self._set_output_blobs(net, InstantiationContext.PREDICTION)
def add_eval_ops(self, net):
self._set_output_blobs(net, InstantiationContext.EVAL)
def add_train_ops(self, net):
self._set_output_blobs(net, InstantiationContext.TRAINING)
def add_ops_to_accumulate_pred(self, net):
self._set_output_blobs(net, InstantiationContext.ACCUMULATE_PRED)
|
pytorch-master
|
caffe2/python/layers/select_record_by_context.py
|
## @package split
# Module caffe2.python.layers.split
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class Split(ModelLayer):
def __init__(self, model, input_record, num_splits=1, axis=1,
name='split', split=None, **kwargs):
super(Split, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
axis -= 1
assert axis >= 0
assert isinstance(input_record, schema.Scalar),\
"Incorrect input type. Expected Scalar, but received: {0}".\
format(input_record)
input_shape = input_record.field_type().shape
assert len(input_shape) >= axis
if split is None:
assert input_shape[axis] % num_splits == 0
else:
num_splits = len(split)
assert input_shape[axis] == sum(split)
if split is None:
output_shape = list(input_shape)
output_shape[axis] = int(output_shape[axis] / num_splits)
else:
output_shape = []
for i in range(num_splits):
output_shape_i = list(input_shape)
output_shape_i[axis] = split[i]
output_shape.append(output_shape_i)
data_type = input_record.field_type().base
if split is None:
output_scalars = [
schema.Scalar(
(data_type, output_shape),
self.get_next_blob_reference('output_{}'.format(i)),
)
for i in range(num_splits)
]
else:
output_scalars = [
schema.Scalar(
(data_type, output_shape[i]),
self.get_next_blob_reference('output_{}'.format(i)),
)
for i in range(num_splits)
]
self.output_schema = schema.Tuple(*output_scalars)
self.split = split
def add_ops(self, net):
net.Split(
self.input_record.field_blobs(),
self.output_schema.field_blobs(),
split=self.split,
axis=self.axis,
)
|
pytorch-master
|
caffe2/python/layers/split.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# @package label_smooth
# Module caffe2.python.layers.label_smooth
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class LabelSmooth(ModelLayer):
def __init__(
self, model, label, smooth_matrix, name='label_smooth', **kwargs
):
super(LabelSmooth, self).__init__(model, name, label, **kwargs)
self.label = label
# shape as a list
smooth_matrix = np.array(smooth_matrix).astype(np.float32).flatten()
self.set_dim(smooth_matrix)
self.set_smooth_matrix(smooth_matrix)
self.output_schema = schema.Scalar(
(np.float32, (self.dim, )),
self.get_next_blob_reference('smoothed_label')
)
def set_dim(self, smooth_matrix):
num_elements = smooth_matrix.size
self.binary_prob_label = (num_elements == 2)
if self.binary_prob_label:
self.dim = 1
else:
assert np.sqrt(num_elements)**2 == num_elements
self.dim = int(np.sqrt(num_elements))
def set_smooth_matrix(self, smooth_matrix):
if not self.binary_prob_label:
self.smooth_matrix = self.model.add_global_constant(
'%s_label_smooth_matrix' % self.name,
array=smooth_matrix.reshape((self.dim, self.dim)),
dtype=np.dtype(np.float32),
)
self.len = self.model.add_global_constant(
'%s_label_dim' % self.name,
array=self.dim,
dtype=np.dtype(np.int64),
)
else:
self.smooth_matrix = smooth_matrix
def add_ops_for_binary_prob_label(self, net):
if self.label.field_type().base != np.float32:
float32_label = net.NextScopedBlob('float32_label')
net.Cast([self.label()], [float32_label], to=core.DataType.FLOAT)
else:
float32_label = self.label()
net.StumpFunc(
float32_label,
self.output_schema(),
threshold=0.5,
low_value=self.smooth_matrix[0],
high_value=self.smooth_matrix[1],
)
def add_ops_for_categorical_label(self, net):
if self.label.field_type().base != np.int64:
int64_label = net.NextScopedBlob('int64_label')
net.Cast([self.label()], [int64_label], to=core.DataType.INT64)
else:
int64_label = self.label()
one_hot_label = net.NextScopedBlob('one_hot_label')
net.OneHot([int64_label, self.len], [one_hot_label])
net.MatMul([one_hot_label, self.smooth_matrix], self.output_schema())
def add_ops(self, net):
if self.binary_prob_label:
self.add_ops_for_binary_prob_label(net)
else:
self.add_ops_for_categorical_label(net)
|
pytorch-master
|
caffe2/python/layers/label_smooth.py
|
## @package reservoir_sampling
# Module caffe2.python.layers.reservoir_sampling
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class ReservoirSampling(ModelLayer):
"""
Collect samples from input record w/ reservoir sampling. If you have complex
data, use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='reservoir_sampling', **kwargs):
super(ReservoirSampling, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
self.reservoir = self.create_param(
param_name='reservoir',
shape=[0],
initializer=('ConstantFill',),
optimizer=model.NoOptim,
)
self.num_visited_blob = self.create_param(
param_name='num_visited',
shape=[],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.mutex = self.create_param(
param_name='mutex',
shape=[],
initializer=('CreateMutex',),
optimizer=model.NoOptim,
)
self.extra_input_blobs = []
self.extra_output_blobs = []
if 'object_id' in input_record:
object_to_pos = self.create_param(
param_name='object_to_pos',
shape=None,
initializer=('CreateMap', {
'key_dtype': core.DataType.INT64,
'valued_dtype': core.DataType.INT32,
}),
optimizer=model.NoOptim,
)
pos_to_object = self.create_param(
param_name='pos_to_object',
shape=[0],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.extra_input_blobs.append(input_record.object_id())
self.extra_input_blobs.extend([object_to_pos, pos_to_object])
self.extra_output_blobs.extend([object_to_pos, pos_to_object])
self.output_schema = schema.Struct(
(
'reservoir',
schema.from_blob_list(input_record.data, [self.reservoir])
),
('num_visited', schema.Scalar(blob=self.num_visited_blob)),
('mutex', schema.Scalar(blob=self.mutex)),
)
def add_ops(self, net):
net.ReservoirSampling(
[self.reservoir, self.num_visited_blob, self.input_record.data(),
self.mutex] + self.extra_input_blobs,
[self.reservoir, self.num_visited_blob] + self.extra_output_blobs,
num_to_collect=self.num_to_collect,
)
|
pytorch-master
|
caffe2/python/layers/reservoir_sampling.py
|
## @package sparse_feature_hash
# Module caffe2.python.layers.sparse_feature_hash
from caffe2.python import schema, core
from caffe2.python.layers.layers import (
ModelLayer,
IdList,
IdScoreList,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, use_divide_mod=False, divisor=None, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."
if use_divide_mod:
assert divisor >= 1, 'Unexpected divisor: {}'.format(divisor)
self.divisor = self.create_param(param_name='divisor',
shape=[1],
initializer=('GivenTensorInt64Fill', {'values': np.array([divisor])}),
optimizer=model.NoOptim)
self.seed = seed
self.use_hashing = use_hashing
self.use_divide_mod = use_divide_mod
if schema.equal_schemas(input_record, IdList):
self.modulo = modulo or self.extract_hash_size(input_record.items.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.items.metadata.feature_specs if input_record.items.metadata else None,
expected_value=input_record.items.metadata.expected_value if input_record.items.metadata else None
)
with core.NameScope(name):
self.output_schema = schema.NewRecord(model.net, IdList)
self.output_schema.items.set_metadata(metadata)
elif schema.equal_schemas(input_record, IdScoreList):
self.modulo = modulo or self.extract_hash_size(input_record.keys.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.keys.metadata.feature_specs,
expected_value=input_record.keys.metadata.expected_value
)
with core.NameScope(name):
self.output_schema = schema.NewRecord(model.net, IdScoreList)
self.output_schema.keys.set_metadata(metadata)
else:
assert False, "Input type must be one of (IdList, IdScoreList)"
assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
if input_record.lengths.metadata:
self.output_schema.lengths.set_metadata(input_record.lengths.metadata)
# operators in this layer do not have CUDA implementation yet.
# In addition, since the sparse feature keys that we are hashing are
# typically on CPU originally, it makes sense to have this layer on CPU.
self.tags.update([Tags.CPU_ONLY])
def extract_hash_size(self, metadata):
if metadata.feature_specs and metadata.feature_specs.desired_hash_size:
return metadata.feature_specs.desired_hash_size
elif metadata.categorical_limit is not None:
return metadata.categorical_limit
else:
assert False, "desired_hash_size or categorical_limit must be set"
def add_ops(self, net):
net.Copy(
self.input_record.lengths(),
self.output_schema.lengths()
)
if schema.equal_schemas(self.output_schema, IdList):
input_blob = self.input_record.items()
output_blob = self.output_schema.items()
elif schema.equal_schemas(self.output_schema, IdScoreList):
input_blob = self.input_record.keys()
output_blob = self.output_schema.keys()
net.Copy(
self.input_record.values(),
self.output_schema.values()
)
else:
raise NotImplementedError()
if self.use_hashing:
net.IndexHash(
input_blob, output_blob, seed=self.seed, modulo=self.modulo
)
else:
if self.use_divide_mod:
quotient = net.Div([input_blob, self.divisor], [net.NextScopedBlob('quotient')])
net.Mod(
quotient, output_blob, divisor=self.modulo, sign_follow_divisor=True
)
else:
net.Mod(
input_blob, output_blob, divisor=self.modulo, sign_follow_divisor=True
)
|
pytorch-master
|
caffe2/python/layers/sparse_feature_hash.py
|
## @package fc_with_bootstrap
# Module caffe2.python.layers.fc_with_bootstrap
import math
import numpy as np
from caffe2.python import core, schema
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
def get_fc_predictor_version(fc_version):
assert fc_version in ["fp32"], (
"Only support fp32 for the fully connected layer "
"in the predictor net, the provided FC precision is {}".format(fc_version)
)
return fc_version
class FCWithBootstrap(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
num_bootstrap,
weight_init=None,
bias_init=None,
weight_optim=None,
bias_optim=None,
name="fc_with_bootstrap",
weight_reg=None,
bias_reg=None,
clip_param=None,
axis=1,
**kwargs
):
super(FCWithBootstrap, self).__init__(model, name, input_record, **kwargs)
assert isinstance(
input_record, schema.Scalar
), "Incorrect input type {}".format(input_record)
assert (
len(input_record.field_types()[0].shape) > 0
), "FC expects limited dimensions of the input tensor"
assert axis >= 1, "axis {} should >= 1.".format(axis)
self.axis = axis
input_dims = np.prod(input_record.field_types()[0].shape[axis - 1 :])
assert input_dims > 0, "FC expects input dimensions > 0, got {}".format(
input_dims
)
self.clip_args = None
# attributes for bootstrapping below
self.num_bootstrap = num_bootstrap
# input dim shape
self.input_dims = input_dims
# bootstrapped fully-connected layers to be used in eval time
self.bootstrapped_FCs = []
# scalar containing batch_size blob so that we don't need to recompute
self.batch_size = None
# we want this to be the last FC, so the output_dim should be 1, set to None
self.output_dim_vec = None
# lower bound when creating random indices
self.lower_bound = None
# upper bound when creating random indices
self.upper_bound = None
if clip_param is not None:
assert len(clip_param) == 2, (
"clip_param must be a tuple / list "
"of length 2 and in the form of (clip_min, clip max)"
)
clip_min, clip_max = clip_param
assert (
clip_min is not None or clip_max is not None
), "clip_min, and clip_max in clip_param cannot both be None"
assert (
clip_min is None or clip_max is None
) or clip_min < clip_max, (
"clip_param = [clip_min, clip_max] must have clip_min < clip_max"
)
self.clip_args = {}
if clip_min is not None:
self.clip_args["min"] = clip_min
if clip_max is not None:
self.clip_args["max"] = clip_max
scale = math.sqrt(1.0 / input_dims)
weight_init = (
weight_init
if weight_init
else ("UniformFill", {"min": -scale, "max": scale})
)
bias_init = (
bias_init if bias_init else ("UniformFill", {"min": -scale, "max": scale})
)
"""
bootstrapped FCs:
Ex: [
bootstrapped_weights_blob_1, bootstrapped_bias_blob_1,
...,
...,
bootstrapped_weights_blob_b, bootstrapped_bias_blob_b
]
output_schema:
Note: indices will always be on even indices.
Ex: Struct(
indices_0_blob,
preds_0_blob,
...
...
indices_b_blob,
preds_b_blob
)
"""
bootstrapped_FCs = []
output_schema = schema.Struct()
for i in range(num_bootstrap):
output_schema += schema.Struct(
(
"bootstrap_iteration_{}/indices".format(i),
self.get_next_blob_reference(
"bootstrap_iteration_{}/indices".format(i)
),
),
(
"bootstrap_iteration_{}/preds".format(i),
self.get_next_blob_reference(
"bootstrap_iteration_{}/preds".format(i)
),
),
)
self.bootstrapped_FCs.extend(
[
self.create_param(
param_name="bootstrap_iteration_{}/w".format(i),
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg,
),
self.create_param(
param_name="bootstrap_iteration_{}/b".format(i),
shape=[output_dims],
initializer=bias_init,
optimizer=bias_optim,
regularizer=bias_reg,
),
]
)
self.output_schema = output_schema
if axis == 1:
output_shape = (output_dims,)
else:
output_shape = list(input_record.field_types()[0].shape)[0 : axis - 1]
output_shape = tuple(output_shape + [output_dims])
def _generate_bootstrapped_indices(self, net, copied_cur_layer, iteration):
"""
Args:
net: the caffe2 net to insert operator
copied_cur_layer: blob of the bootstrapped features (make sure this
blob has a stop_gradient on)
iteration: the bootstrap interation to generate for. Used to correctly
populate the output_schema
Return:
A blob containing the generated indices of shape: (batch_size,)
"""
with core.NameScope("bootstrap_iteration_{}".format(iteration)):
if iteration == 0:
# capture batch_size once for efficiency
input_shape = net.Shape(copied_cur_layer, "input_shape")
batch_size_index = net.Const(np.array([0]), "batch_size_index")
batch_size = net.Gather([input_shape, batch_size_index], "batch_size")
self.batch_size = batch_size
lower_bound = net.Const(np.array([0]), "lower_bound", dtype=np.int32)
offset = net.Const(np.array([1]), "offset", dtype=np.int32)
int_batch_size = net.Cast(
[self.batch_size], "int_batch_size", to=core.DataType.INT32
)
upper_bound = net.Sub([int_batch_size, offset], "upper_bound")
self.lower_bound = lower_bound
self.upper_bound = upper_bound
indices = net.UniformIntFill(
[self.batch_size, self.lower_bound, self.upper_bound],
self.output_schema[iteration * 2].field_blobs()[0],
input_as_shape=1,
)
return indices
def _bootstrap_ops(self, net, copied_cur_layer, indices, iteration):
"""
This method contains all the bootstrapping logic used to bootstrap
the features. Only used by the train_net.
Args:
net: the caffe2 net to insert bootstrapping operators
copied_cur_layer: the blob representing the current features.
Note, this layer should have a stop_gradient on it.
Returns:
bootstrapped_features: blob of bootstrapped version of cur_layer
with same dimensions
"""
# draw features based upon the bootstrapped indices
bootstrapped_features = net.Gather(
[copied_cur_layer, indices],
net.NextScopedBlob("bootstrapped_features_{}".format(iteration)),
)
bootstrapped_features = schema.Scalar(
(np.float32, self.input_dims), bootstrapped_features
)
return bootstrapped_features
def _insert_fc_ops(self, net, features, params, outputs, version):
"""
Args:
net: the caffe2 net to insert operator
features: Scalar containing blob of the bootstrapped features or
actual cur_layer features
params: weight and bias for FC
outputs: the output blobs
version: support fp32 for now.
"""
if version == "fp32":
pred_blob = net.FC(
features.field_blobs() + params, outputs, axis=self.axis, **self.kwargs
)
return pred_blob
else:
raise Exception("unsupported FC type version {}".format(version))
def _add_ops(self, net, features, iteration, params, version):
"""
Args:
params: the weight and bias, passed by either add_ops or
add_train_ops function
features: feature blobs to predict on. Can be the actual cur_layer
or the bootstrapped_feature blobs.
version: currently fp32 support only
"""
if self.clip_args is not None:
clipped_params = [net.NextScopedBlob("clipped_%s" % str(p)) for p in params]
for p, cp in zip(params, clipped_params):
net.Clip([p], [cp], **self.clip_args)
params = clipped_params
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
self._insert_fc_ops(
net=net,
features=features,
params=params,
outputs=[self.output_schema.field_blobs()[(iteration * 2) + 1]],
version=version,
)
def add_ops(self, net):
"""
Both the predict net and the eval net will call this function.
For bootstrapping approach, the goal is to pass the cur_layer feature
inputs through all the bootstrapped FCs that are stored under
self.bootstrapped_FCs. Return the preds in the same output_schema
with dummy indices (because they are not needed).
"""
version_info = get_current_scope().get(
get_fc_predictor_version.__name__, {"fc_version": "fp32"}
)
predictor_fc_fp_version = version_info["fc_version"]
for i in range(self.num_bootstrap):
# these are dummy indices, not to be used anywhere
indices = self._generate_bootstrapped_indices(
net=net,
copied_cur_layer=self.input_record.field_blobs()[0],
iteration=i,
)
params = self.bootstrapped_FCs[i * 2 : (i * 2) + 2]
self._add_ops(
net=net,
features=self.input_record,
params=params,
iteration=i,
version=predictor_fc_fp_version,
)
def add_train_ops(self, net):
# use the train_param_blobs to be consistent with the SamplingTrain unittest
# obtain features
for i in range(self.num_bootstrap):
indices = self._generate_bootstrapped_indices(
net=net,
copied_cur_layer=self.input_record.field_blobs()[0],
iteration=i,
)
bootstrapped_features = self._bootstrap_ops(
net=net,
copied_cur_layer=self.input_record.field_blobs()[0],
indices=indices,
iteration=i,
)
self._add_ops(
net,
features=bootstrapped_features,
iteration=i,
params=self.train_param_blobs[i * 2 : (i * 2) + 2],
version="fp32",
)
def get_fp16_compatible_parameters(self):
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
return [
blob for idx, blob in enumerate(self.bootstrapped_FCs) if idx % 2 == 0
]
else:
raise Exception(
"Currently only supports functionality for output_dim_vec == 1"
)
@property
def param_blobs(self):
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
return self.bootstrapped_FCs
else:
raise Exception("FCWithBootstrap layer only supports output_dim_vec==1")
|
pytorch-master
|
caffe2/python/layers/fc_with_bootstrap.py
|
# Module caffe2.python.layers.dropout
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
class Dropout(ModelLayer):
def __init__(
self,
model,
input_record,
name='dropout',
ratio=0.5,
dropout_for_eval=False,
**kwargs):
super(Dropout, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert (ratio >= 0 and ratio < 1.0), \
"Expected 0 <= ratio < 1, but got ratio of %s" % ratio
self.output_schema = input_record.clone_schema()
self.output_schema.set_value(self.get_next_blob_reference('output'))
self.dropout_for_eval = dropout_for_eval
self.ratio = ratio
def _add_ops(self, net, is_test):
input_blob = self.input_record.field_blobs()
output_blobs = self.output_schema.field_blobs() \
+ [net.NextScopedBlob('d_mask')]
net.Dropout(input_blob,
output_blobs,
ratio=self.ratio,
is_test=is_test)
def add_train_ops(self, net):
self._add_ops(net, is_test=False)
def add_eval_ops(self, net):
self._add_ops(net, is_test=(not self.dropout_for_eval))
def add_ops(self, net):
self.add_eval_ops(net)
|
pytorch-master
|
caffe2/python/layers/dropout.py
|
## @package conv
# Module caffe2.python.layers.conv
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
import numpy as np
class Conv(ModelLayer):
"""
Convolutional layer
Input:
- input_record: at least has the shape info of C (num_channels)
- output_dim: number of convolutional filters
- kernel_h, kernel_w: kernel size for h and w
- stride_h, stride_w: stride for h and w
- pad_b, pad_l, pad_r, pad_t: padding sizes, if stride == 1,
'None' value will do auto padding
- order: either 'NHWC' or 'NCHW'
"""
def __init__(self, model, input_record, output_dim, kernel_h, kernel_w,
stride_h, stride_w, pad_b=None, pad_l=None, pad_r=None,
pad_t=None, order='NHWC', kernel_init=None, bias_init=None,
kernel_optim=None, bias_optim=None,
name='conv', **kwargs):
super(Conv, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
# input num_channels (C) is needed
input_dims = input_record.field_type().shape
assert (kernel_h > 0 and isinstance(kernel_h, int)), (
"kernel_h should be positive integer")
assert (kernel_w > 0 and isinstance(kernel_w, int)), (
"kernel_w should be positive integer")
self.kernel_h = kernel_h
self.kernel_w = kernel_w
assert (stride_h > 0 and isinstance(stride_h, int)), (
"stride_h should be positive integer")
assert (stride_w > 0 and isinstance(stride_w, int)), (
"stride_w should be positive integer")
self.stride_h = stride_h
self.stride_w = stride_w
# output_dim calculation (http://cs231n.github.io/convolutional-networks/)
# output_dim_w = (input_dim_w - kernel_w + pad_r + pad_l) / stride_w + 1
# so, do auto_padding requires
# pad_r, pad_l = [(input_dim_w - 1) * stride_w - input_dim_w + kernel_w] / 2
# similair for pad_t and pad_b to auto pad kernel_h
# here we only do auto padding for stride = 1 case
if stride_h == 1:
pad_t = int((kernel_h - 1) / 2) if pad_t is None else pad_t
pad_b = int((kernel_h - 1) / 2) if pad_b is None else pad_b
else:
pad_t = 0 if pad_t is None else pad_t
pad_b = 0 if pad_b is None else pad_b
if stride_w == 1:
pad_r = int((kernel_w - 1) / 2) if pad_r is None else pad_r
pad_l = int((kernel_w - 1) / 2) if pad_l is None else pad_l
else:
pad_r = 0 if pad_r is None else pad_r
pad_l = 0 if pad_l is None else pad_l
assert (pad_t >= 0 and isinstance(pad_t, int)), "pad_t should be int >= 0"
assert (pad_b >= 0 and isinstance(pad_b, int)), "pad_b should be int >= 0"
assert (pad_r >= 0 and isinstance(pad_r, int)), "pad_r should be int >= 0"
assert (pad_l >= 0 and isinstance(pad_l, int)), "pad_l should be int >= 0"
self.pad_t = pad_t
self.pad_b = pad_b
self.pad_r = pad_r
self.pad_l = pad_l
assert order in ['NHWC', 'NCHW'], "order should either 'NHWC' or 'NCHW'"
self.order = order
if order == 'NHWC':
input_c = input_dims[-1]
kernel_shape = [output_dim, kernel_h, kernel_w, input_c]
elif order == 'NCHW':
input_c = input_dims[0]
kernel_shape = [output_dim, input_c, kernel_h, kernel_w]
assert input_c > 0, (
"Number of input channels in conv parameters should be positive")
kernel_init = kernel_init if kernel_init else (
'XavierFill', {}
)
bias_init = bias_init if bias_init else (
'ConstantFill', {'value': 0.0}
)
self.kernel = self.create_param(
param_name='conv_kernel',
shape=kernel_shape,
initializer=kernel_init,
optimizer=kernel_optim,
)
self.bias = self.create_param(
param_name='conv_bias',
shape=[output_dim],
initializer=bias_init,
optimizer=bias_optim,
)
# the output_schema only has the num of output channels
# output_h and output_w would be inferred internally
self.output_schema = schema.Scalar(
(np.float32, (output_dim,)),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
net.Conv(
self.input_record.field_blobs() + [self.kernel, self.bias],
self.output_schema.field_blobs(),
kernel_h=self.kernel_h,
kernel_w=self.kernel_w,
stride_h=self.stride_h,
stride_w=self.stride_w,
pad_t=self.pad_t,
pad_l=self.pad_l,
pad_b=self.pad_b,
pad_r=self.pad_r,
order=self.order
)
|
pytorch-master
|
caffe2/python/layers/conv.py
|
## @package layers
# Module caffe2.python.layers.layers
import logging
from collections import namedtuple
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, schema, scope, utils, workspace
from caffe2.python.layers.tags import TagContext
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Some types to simplify descriptions of things traveling between ops
IdList = schema.List(np.int64)
IdScoreList = schema.Map(np.int64, np.float32)
IdListWithEvicted = schema.ListWithEvicted(np.int64)
IdScoreListWithEvicted = schema.MapWithEvicted(np.int64, np.float32)
def almost_equal_schemas(
record,
original_schema,
check_field_names=True,
check_field_types=True,
check_field_metas=False,
):
if original_schema == IdList:
return schema.equal_schemas(
record,
IdList,
check_field_names=check_field_names,
check_field_types=check_field_types,
check_field_metas=check_field_metas,
) or schema.equal_schemas(
record,
IdListWithEvicted,
check_field_names=check_field_names,
check_field_types=check_field_types,
check_field_metas=check_field_metas,
)
elif original_schema == IdScoreList:
return schema.equal_schemas(
record,
IdScoreList,
check_field_names=check_field_names,
check_field_types=check_field_types,
check_field_metas=check_field_metas,
) or schema.equal_schemas(
record,
IdScoreListWithEvicted,
check_field_names=check_field_names,
check_field_types=check_field_types,
check_field_metas=check_field_metas,
)
else:
return schema.equal_schemas(record, original_schema)
def get_key(record):
if almost_equal_schemas(record, IdList):
key = "values"
elif almost_equal_schemas(
record, IdScoreList, check_field_types=False
):
key = "values:keys"
else:
raise NotImplementedError("Not implemented for {}".format(record))
assert record[key].metadata is not None, "Blob {} doesn't have metadata".format(
str(record[key]())
)
return record[key]
def get_categorical_limit(record):
key = get_key(record)
return key.metadata.categorical_limit
def get_avg_length(record):
return record["lengths"].metadata.expected_value
def set_request_only(field):
for f in field.all_scalars():
categorical_limit, expected_value = None, None
if not f.metadata:
feature_specs = schema.FeatureSpec(feature_is_request_only=True)
elif not f.metadata.feature_specs:
categorical_limit = f.metadata.categorical_limit
expected_value = f.metadata.expected_value
feature_specs = schema.FeatureSpec(feature_is_request_only=True)
else:
categorical_limit = f.metadata.categorical_limit
expected_value = f.metadata.expected_value
feature_specs = schema.FeatureSpec(
feature_type=f.metadata.feature_specs.feature_type,
feature_names=f.metadata.feature_specs.feature_names,
feature_ids=f.metadata.feature_specs.feature_ids,
feature_is_request_only=True,
desired_hash_size=f.metadata.feature_specs.desired_hash_size,
)
# make sure not to set categorical_limit for a non-integer field
if not np.issubdtype(f.field_type(), np.integer):
assert (
categorical_limit is None
), "categorical_limit shouldn't be set for no-integer field"
f.set_metadata(
schema.Metadata(
categorical_limit=categorical_limit,
expected_value=expected_value,
feature_specs=feature_specs,
)
)
class InstantiationContext(object):
"""
List of contexts where layer could be instantitated
"""
# The layers support this context will accumulate predictions, labels,
# weights. The accumulated data can later be used to compute
# calibration or for other
# purpose.
ACCUMULATE_PRED = "accumulate_pred"
EVAL = "eval"
PREDICTION = "prediction"
TRAINING = "training"
_LAYER_REGISTRY = {}
def register_layer(name, layer):
assert name not in _LAYER_REGISTRY, "{0} already exists".format(name)
_LAYER_REGISTRY[name] = layer
def layer_exists(name):
return name in _LAYER_REGISTRY
def get_layer_class(name):
return _LAYER_REGISTRY[name]
def create_layer(layer_name, *args, **kwargs):
return _LAYER_REGISTRY[layer_name](*args, **kwargs)
LayerPsParam = namedtuple("LayerPsParam", ["sparse_key", "average_length"])
class LayerParameter(object):
def __init__(
self,
parameter=None,
optimizer=None,
initializer=None,
ps_param=None,
regularizer=None,
):
assert isinstance(
parameter, core.BlobReference
), "expect {0} to be a blob reference".format(str(parameter))
# need to put the following line (shape) before initialier
# shape will be updated once initializer is (re)set
self._shape = None
self.parameter = parameter
self.optimizer = optimizer
self.initializer = initializer
self.ps_param = ps_param
self.regularizer = regularizer
@property
def initializer(self):
return self._initializer
@initializer.setter
def initializer(self, op):
assert op is None or core.IsOperator(
getattr(op, "type", None)
), "initializer expects an operator, got type: {}".format(type(op))
self._initializer = op
if op is not None:
self.shape = self._infer_shape_from_initializer()
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, shape):
assert self.shape is None or self.shape == shape, (
"inconsistent shape for layer parameter:"
" {}, expect: {}, but got {}".format(self, self.shape, shape)
)
self._shape = shape
def _infer_shape_from_initializer(self):
for arg in self.initializer.arg:
if arg.name == "shape":
return list(arg.ints)
with workspace.WorkspaceGuard("model_init_by_loading_params"):
try:
net = core.Net("shape_checker")
net._net.op.extend([self.initializer])
shape_blob = net.NextScopedBlob(self.parameter + "_shape")
net.Shape([self.parameter], shape_blob)
workspace.RunNetOnce(net)
shape = workspace.FetchBlob(shape_blob).tolist()
# ResetWorkspace to save memory
workspace.ResetWorkspace()
return shape
except RuntimeError as exp:
logger.warning(
"Cannot infer the shape of blob {} from operator {}: {}".format(
self.parameter, self.initializer.type, exp
)
)
workspace.ResetWorkspace()
return None
def __str__(self):
return str(self.parameter)
def is_request_only_scalar(scalar):
if len(scalar.field_metadata()) == 0:
return False
for metadata in scalar.field_metadata():
if not (
metadata
and metadata.feature_specs
and getattr(metadata.feature_specs, "feature_is_request_only", False)
):
return False
return True
# Contains features accessed in a model layer of a given type
# `type`: A string representing the kind of feature, consistent with FeatureSpec
# `ids`: A set of feature IDs that are accessed in the model layer
AccessedFeatures = namedtuple("AccessedFeatures", ["type", "ids"])
class ModelLayer(object):
def __init__(
self,
model,
prefix,
input_record,
predict_input_record_fields=None,
tags=None,
**kwargs
):
"""
Base class for model layers. Layer is an abstraction that allows to
provide model description in terms of meta-operators, where each of the
meta-operators can have different implementations for training,
evaluation and prediction, that are instantiated later. As an example
SampledSoftmax can do something related to sampling depending on
supervision during the training and just apply softmax if it's used for
prediction/evaluation.
All inputs/outputs from layers are represented as a record (instance of
schema bounded to blobs) and are accessible through input_record and
output_schema. If Layer needs to have only a subset of inputs/provides
subset of outputs during the inference - it should provide
predict_input_record and predict_output_schema correspondingly (those
records are expected to be a subset of input_record/output_schema).
Each layer has a list of Tags associated with it, that depends on
current context and arguments. It's possible to use those tags during
the instantiation time.
"""
self.name = model.next_layer_name(prefix)
self.model = model
self.kwargs = kwargs
self._input_record = input_record
if predict_input_record_fields:
if not isinstance(predict_input_record_fields, list):
predict_input_record_fields = [predict_input_record_fields]
self._predict_input_record = self._input_record[predict_input_record_fields]
else:
self._predict_input_record = None
self.request_only = True
if len(input_record.all_scalars()) == 0:
self.request_only = False
for scalar in input_record.all_scalars():
if not is_request_only_scalar(scalar):
self.request_only = False
break
self.precomputation_request_only = False
self.precomputation_object_only = False
self._output_schema = None
self._predict_output_schema = None
self.eval_output_schema = None
self.tags = set(tags or [])
self.tags.update(TagContext.current().tags)
self.params = []
self._export_output_for_metrics = False
self._export_params_for_metrics = False
def get_type(self):
return self.__class__.__name__
def _check_output_schema(self):
assert self._output_schema is not None, "Schema is not initialized"
assert self._predict_output_schema is None or schema.is_schema_subset(
self._predict_output_schema, self._output_schema
), "predict_output_schema is not a subset of the output_schema"
@property
def predict_input_record(self):
return self._predict_input_record or self._input_record
@property
def input_record(self):
return self._input_record
@property
def predict_output_schema(self):
self._check_output_schema()
return self._predict_output_schema or self._output_schema
@predict_output_schema.setter
def predict_output_schema(self, output_schema):
assert self._predict_output_schema is None
self._predict_output_schema = output_schema
@property
def output_schema(self):
if self.request_only:
set_request_only(self._output_schema)
self._check_output_schema()
return self._output_schema
@output_schema.setter
def output_schema(self, output_schema):
assert self._output_schema is None
self._output_schema = output_schema
def get_parameters(self):
return self.params
def get_fp16_compatible_parameters(self):
"""Return a subset of parameters which can be converted to fp16"""
return []
def get_memory_usage(self):
return 0
def get_accessed_features(self):
"""
Return a map from field to list of AccessedFeatures, the map should
contain all features accessed in the model layer
"""
return {}
def add_init_params(self, init_net):
"""
Adds layer initialization operators to passed net.
"""
for param in self.params:
# TODO(amalevich): Either return back to lambdas, that add
# all params (looks a bit safer and breaking less
# abstractions) or extend Net interface to this type of
# operations better
# TODO(xlwang) init_net._net.op has type google.protobuf.\
# internal.containers.RepeatedCompositeFieldContainer, but
# the version of protobuf in fbcode does not support append
# so extend is used
init_op = param.initializer
current_device_scope = scope.CurrentDeviceScope()
if not init_op:
continue
if not init_op.HasField("device_option") and current_device_scope:
init_op = caffe2_pb2.OperatorDef()
init_op.CopyFrom(param.initializer)
init_op.device_option.CopyFrom(current_device_scope)
# do not add duplicated init ops
if any(
utils.OpAlmostEqual(op, init_op, "debug_info")
for op in init_net._net.op
):
continue
init_net._net.op.extend([init_op])
def create_param(
self, param_name, shape, initializer, optimizer, ps_param=None, regularizer=None
):
with scope.NameScope(self.name, reset=True):
param = self.model.create_param(
param_name=param_name,
shape=shape,
initializer=initializer,
optimizer=optimizer,
ps_param=ps_param,
regularizer=regularizer,
)
# make sure we don't share parameters in the same layer
assert all(param.parameter != p.parameter for p in self.params)
self.params.append(param)
return param.parameter
def get_next_blob_reference(self, name):
with scope.NameScope(self.name, reset=True):
return self.model.net.NextScopedBlob(name)
def add_operators(self, net, init_net=None, context=InstantiationContext.TRAINING):
"""
Adds layer trainig or initialization operators to the passed in net.
init_net can be None and can be called independently from add_init_params
"""
# Namescope below should warranty that all intermediate blobs will be
# assiciated with the layer that produces them
with scope.NameScope(self.name):
if context not in {
InstantiationContext.PREDICTION,
InstantiationContext.EVAL,
InstantiationContext.ACCUMULATE_PRED,
}:
assert init_net, "Only prediction and eval context don't need init_net"
if init_net:
self.add_init_params(init_net)
if context == InstantiationContext.TRAINING:
self.add_train_ops(net)
elif context == InstantiationContext.EVAL:
self.add_eval_ops(net)
elif context == InstantiationContext.ACCUMULATE_PRED:
self.add_ops_to_accumulate_pred(net)
else:
self.add_ops(net)
if (
context in {InstantiationContext.TRAINING, InstantiationContext.EVAL}
and self._export_params_for_metrics
):
self.add_param_copy_operators(net)
def add_ops(self, net):
# Predict layer implementation.
raise NotImplementedError
def add_eval_ops(self, net):
# Default eval layer implementation is completely matching
# predict layer implementation.
self.add_ops(net)
def add_train_ops(self, net):
# Default train layer implementation is completely matching
# eval layer implementation.
self.add_eval_ops(net)
def add_ops_to_accumulate_pred(self, net):
# This adds operators to accumulate predictions/labels/weights. The
# accumulated data can later be used to compute calibration or for other
# purpose. Default layer implementation is completely matching eval
# layer implementation.
self.add_eval_ops(net)
def add_param_copy_operators(self, net):
for param in self.params:
param_copy_ref = self.model.metrics_schema[str(param.parameter)]
net.Copy([param.parameter], param_copy_ref.field_blobs())
def export_output_for_metrics(self):
self._export_output_for_metrics = True
# Export output of the layer directly
export_name = self.name + "/output"
self.model.add_metric_field(export_name, self.output_schema)
def export_params_for_metrics(self):
self._export_params_for_metrics = True
# Export copies of parameters
for param in self.params:
param_copy_ref = self.get_next_blob_reference(
str(param).split("/")[-1] + "_copy"
)
self.model.add_metric_field(str(param.parameter), param_copy_ref)
|
pytorch-master
|
caffe2/python/layers/layers.py
|
from caffe2.python import schema
from caffe2.python.layers.layers import (
IdList,
ModelLayer,
)
# Model layer for implementing probabilistic replacement of elements in
# IdLists. Takes probabilities for train, eval and predict nets as input, as
# well as the replacement value when dropout happens. For features we may have
# available to us in train net but not in predict net, we'd set dropout
# probability for predict net to be 1.0 and set the feature to the replacement
# value given here. This way, the value is tied to the particular model and not
# to any specific logic in feature processing in serving.
# Consider the following example where X is the values in the IdList and Lengths
# is the number of values corresponding to each example.
# X: [1, 2, 3, 4, 5]
# Lengths: [2, 3]
# This IdList contains 2 items of lengths 2, 3. Let's assume we used a ratio of
# 0.5 and ended up dropping out 2nd example, and used a replacement value of -1.
# We will end up with the following IdList.
#
# Y: [1, 2, -1]
# OutputLengths: [2, 1]
# where the 2nd item values [3,4,5] were replaced with [-1] and the length got
# set to 1.
class SparseDropoutWithReplacement(ModelLayer):
def __init__(
self,
model,
input_record,
dropout_prob_train,
dropout_prob_eval,
dropout_prob_predict,
replacement_value,
name='sparse_dropout',
**kwargs):
super(SparseDropoutWithReplacement, self).__init__(model, name, input_record, **kwargs)
assert schema.equal_schemas(input_record, IdList), "Incorrect input type"
self.dropout_prob_train = float(dropout_prob_train)
self.dropout_prob_eval = float(dropout_prob_eval)
self.dropout_prob_predict = float(dropout_prob_predict)
self.replacement_value = int(replacement_value)
assert (self.dropout_prob_train >= 0 and
self.dropout_prob_train <= 1.0), \
"Expected 0 <= dropout_prob_train <= 1, but got %s" \
% self.dropout_prob_train
assert (self.dropout_prob_eval >= 0 and
self.dropout_prob_eval <= 1.0), \
"Expected 0 <= dropout_prob_eval <= 1, but got %s" \
% dropout_prob_eval
assert (self.dropout_prob_predict >= 0 and
self.dropout_prob_predict <= 1.0), \
"Expected 0 <= dropout_prob_predict <= 1, but got %s" \
% dropout_prob_predict
assert(self.dropout_prob_train > 0 or
self.dropout_prob_eval > 0 or
self.dropout_prob_predict > 0), \
"Ratios all set to 0.0 for train, eval and predict"
self.output_schema = schema.NewRecord(model.net, IdList)
if input_record.lengths.metadata:
self.output_schema.lengths.set_metadata(
input_record.lengths.metadata)
if input_record.items.metadata:
self.output_schema.items.set_metadata(
input_record.items.metadata)
def _add_ops(self, net, ratio):
input_values_blob = self.input_record.items()
input_lengths_blob = self.input_record.lengths()
output_lengths_blob = self.output_schema.lengths()
output_values_blob = self.output_schema.items()
net.SparseDropoutWithReplacement([input_values_blob,
input_lengths_blob],
[output_values_blob,
output_lengths_blob],
ratio=ratio,
replacement_value=self.replacement_value)
def add_train_ops(self, net):
self._add_ops(net, self.dropout_prob_train)
def add_eval_ops(self, net):
self._add_ops(net, self.dropout_prob_eval)
def add_ops(self, net):
self._add_ops(net, self.dropout_prob_predict)
|
pytorch-master
|
caffe2/python/layers/sparse_dropout_with_replacement.py
|
## @package batch_mse_loss
# Module caffe2.python.layers.batch_mse_loss
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchMSELoss(ModelLayer):
def __init__(self, model, input_record, name='batch_mse_loss', **kwargs):
super(BatchMSELoss, self).__init__(model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
prediction = self.input_record.prediction()
label = self.input_record.label.field_blobs()
if self.input_record.label.field_type().base != (
self.input_record.prediction.field_type().base):
label = net.Cast(
label,
net.NextScopedBlob('cast_label'),
to=schema.data_type_for_dtype(
self.input_record.prediction.field_type()
)
)
label = net.ExpandDims(label, 1, dims=[1])
label = net.StopGradient(
label,
net.NextScopedBlob('stopped_label')
)
l2dist = net.SquaredL2Distance(
[label, prediction],
net.NextScopedBlob('l2')
)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
l2dist = net.Mul(
[l2dist, weight_blob],
net.NextScopedBlob('weighted_l2_distance'),
)
net.AveragedLoss(l2dist, self.output_schema.field_blobs())
|
pytorch-master
|
caffe2/python/layers/batch_mse_loss.py
|
# @package constant_weight
# Module caffe2.fb.python.layers.constant_weight
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class ConstantWeight(ModelLayer):
def __init__(
self,
model,
input_record,
weights=None,
name='constant_weight',
**kwargs
):
super(ConstantWeight,
self).__init__(model, name, input_record, **kwargs)
self.output_schema = schema.Scalar(
np.float32, self.get_next_blob_reference('constant_weight')
)
self.data = self.input_record.field_blobs()
self.num = len(self.data)
weights = (
weights if weights is not None else
[1. / self.num for _ in range(self.num)]
)
assert len(weights) == self.num
self.weights = [
self.model.add_global_constant(
'%s_weight_%d' % (self.name, i), float(weights[i])
) for i in range(self.num)
]
def add_ops(self, net):
net.WeightedSum(
[b for x_w_pair in zip(self.data, self.weights) for b in x_w_pair],
self.output_schema()
)
|
pytorch-master
|
caffe2/python/layers/constant_weight.py
|
## @package uniform_sampling
# Module caffe2.python.layers.uniform_sampling
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class UniformSampling(ModelLayer):
"""
Uniform sampling `num_samples - len(input_record)` unique elements from the
range [0, num_elements). `samples` is the concatenation of input_record and
the samples. input_record is expected to be unique.
"""
def __init__(
self,
model,
input_record,
num_samples,
num_elements,
name='uniform_sampling',
**kwargs
):
super(UniformSampling, self).__init__(
model, name, input_record, **kwargs
)
assert num_elements > num_samples > 0
assert isinstance(input_record, schema.Scalar)
self.num_elements = num_elements
num_examples_init = ('GivenTensorInt64Fill',
{'values': [num_samples]})
self.num_samples = self.create_param(param_name='num_examples',
shape=(1,),
initializer=num_examples_init,
optimizer=model.NoOptim)
sampling_blob_init = ('ConstantFill',
{'value': float(num_samples) / num_elements,
'dtype': core.DataType.FLOAT})
self.sampling_prob = self.create_param(param_name='prob',
shape=(num_samples,),
initializer=sampling_blob_init,
optimizer=model.NoOptim)
self.output_schema = schema.Struct(
(
'samples', schema.Scalar(
np.int32, self.get_next_blob_reference("samples")
)
),
('sampling_prob', schema.Scalar(np.float32, self.sampling_prob)),
)
def add_ops(self, net):
net.StopGradient(self.sampling_prob, self.sampling_prob)
shape = net.Shape([self.input_record()], net.NextScopedBlob("shape"))
shape = net.Sub([self.num_samples, shape], shape)
samples = net.UniqueUniformFill(
[shape, self.input_record()],
net.NextScopedBlob("samples_before_concat"),
min=0,
max=self.num_elements - 1,
input_as_shape=True
)
net.Concat(
[self.input_record(), samples],
[self.output_schema.samples(), net.NextScopedBlob("split_info")],
axis=0
)
net.StopGradient(
self.output_schema.samples(), self.output_schema.samples()
)
|
pytorch-master
|
caffe2/python/layers/uniform_sampling.py
|
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import unittest
class DoOpTest(TestCase):
def test_operator(self):
def make_net():
subnet = core.Net('subnet')
subnet.Add(["X", "Y"], "Z")
net = core.Net("net")
net.CreateScope([], "W")
net.Do(
["outer_X", "outer_Y", "W"],
["outer_Z", "W"],
net=subnet.Proto(),
inner_blobs=["X", "Y", "Z"],
outer_blobs_idx=[0, 1, 2],
)
return net
net = make_net()
workspace.ResetWorkspace()
workspace.FeedBlob("outer_X", np.asarray([1, 2]))
workspace.FeedBlob("outer_Y", np.asarray([3, 4]))
workspace.RunNetOnce(net)
outer_Z_val = workspace.FetchBlob("outer_Z")
self.assertTrue(np.all(outer_Z_val == np.asarray([4, 6])))
def test_reuse_workspace(self):
def make_net():
param_init_subnet = core.Net('param_init_subnet')
param_init_subnet.ConstantFill([], "X", shape=[1], value=1)
param_init_subnet.ConstantFill([], "Y", shape=[1], value=2)
subnet = core.Net("subnet")
subnet.Add(["X", "Y"], "Z")
net = core.Net("net")
net.CreateScope([], "W")
net.Do(
"W", "W",
net=param_init_subnet.Proto(),
inner_blobs=[],
outer_blobs_idx=[],
)
net.Do(
"W", ["outer_Z", "W"],
net=subnet.Proto(),
inner_blobs=["Z"],
outer_blobs_idx=[0],
reuse_workspace=True,
)
return net
net = make_net()
workspace.ResetWorkspace()
workspace.RunNetOnce(net)
outer_Z_val = workspace.FetchBlob("outer_Z")
self.assertTrue(np.all(outer_Z_val == np.asarray([3])))
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/test/do_op_test.py
|
from caffe2.python import (
brew, cnn, core, workspace, data_parallel_model,
timeout_guard, model_helper, optimizer)
from caffe2.python.test_util import TestCase
import caffe2.python.models.resnet as resnet
from caffe2.python.modeling.initializers import Initializer
from caffe2.python import convnet_benchmarks as cb
from caffe2.python import hypothesis_test_util as hu
import time
import numpy as np
CI_MAX_EXAMPLES = 2
CI_TIMEOUT = 600
def executor_test_settings(func):
if hu.is_sandcastle() or hu.is_travis():
return hu.settings(
max_examples=CI_MAX_EXAMPLES,
deadline=CI_TIMEOUT * 1000 # deadline is in ms
)(func)
else:
return func
def gen_test_resnet50(_order, _cudnn_ws):
model = cnn.CNNModelHelper(
order="NCHW",
name="resnet_50_test",
cudnn_exhaustive_search=True,
)
data = model.net.AddExternalInput("data")
label = model.net.AddExternalInput("label")
(_softmax, loss) = resnet.create_resnet50(
model,
data,
num_input_channels=3,
num_labels=1000,
label=label,
is_test=False,
)
return model, 227
def conv_model_generators():
return {
'AlexNet': cb.AlexNet,
'OverFeat': cb.OverFeat,
'VGGA': cb.VGGA,
'Inception': cb.Inception,
'MLP': cb.MLP,
'Resnet50': gen_test_resnet50,
}
def executor_test_model_names():
if hu.is_sandcastle() or hu.is_travis():
return ["MLP"]
else:
return sorted(conv_model_generators().keys())
def build_conv_model(model_name, batch_size):
model_gen_map = conv_model_generators()
assert model_name in model_gen_map, "Model " + model_name + " not found"
model, input_size = model_gen_map[model_name]("NCHW", None)
input_shape = [batch_size, 3, input_size, input_size]
if model_name == "MLP":
input_shape = [batch_size, input_size]
model.param_init_net.GaussianFill(
[],
"data",
shape=input_shape,
mean=0.0,
std=1.0
)
model.param_init_net.UniformIntFill(
[],
"label",
shape=[batch_size, ],
min=0,
max=999
)
model.AddGradientOperators(["loss"])
ITER = brew.iter(model, "iter")
LR = model.net.LearningRate(
ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.net.WeightedSum([param, ONE, param_grad, LR], param)
return model
def build_resnet50_dataparallel_model(
num_gpus,
batch_size,
epoch_size,
cudnn_workspace_limit_mb=64,
num_channels=3,
num_labels=1000,
weight_decay=1e-4,
base_learning_rate=0.1,
image_size=227,
use_cpu=False):
batch_per_device = batch_size // num_gpus
train_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': False,
'ws_nbytes_limit': (cudnn_workspace_limit_mb * 1024 * 1024),
'deterministic': True,
}
train_model = model_helper.ModelHelper(
name="test_resnet50", arg_scope=train_arg_scope
)
def create_resnet50_model_ops(model, loss_scale):
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=Initializer,
BiasInitializer=Initializer,
enable_tensor_core=0):
pred = resnet.create_resnet50(
model,
"data",
num_input_channels=num_channels,
num_labels=num_labels,
no_bias=True,
no_loss=True,
)
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy")
return [loss]
def add_optimizer(model):
stepsz = int(30 * epoch_size / batch_size)
optimizer.add_weight_decay(model, weight_decay)
opt = optimizer.build_multi_precision_sgd(
model,
base_learning_rate,
momentum=0.9,
nesterov=1,
policy="step",
stepsize=stepsz,
gamma=0.1
)
return opt
def add_image_input(model):
model.param_init_net.GaussianFill(
[],
["data"],
shape=[batch_per_device, 3, image_size, image_size],
dtype='float',
)
model.param_init_net.ConstantFill(
[],
["label"],
shape=[batch_per_device],
value=1,
dtype=core.DataType.INT32,
)
def add_post_sync_ops(model):
for param_info in model.GetOptimizationParamInfo(model.GetParams()):
if param_info.blob_copy is not None:
model.param_init_net.HalfToFloat(
param_info.blob,
param_info.blob_copy[core.DataType.FLOAT])
# Create parallelized model
data_parallel_model.Parallelize(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnet50_model_ops,
optimizer_builder_fun=add_optimizer,
post_sync_builder_fun=add_post_sync_ops,
devices=list(range(num_gpus)),
rendezvous=None,
optimize_gradient_memory=True,
cpu_device=use_cpu,
shared_model=use_cpu,
)
return train_model
def run_resnet50_epoch(train_model, batch_size, epoch_size, skip_first_n_iter=0):
epoch_iters = int(epoch_size / batch_size)
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
train_time = 0.0
train_examples = 0
for i in range(epoch_iters):
timeout = 600.0 if i == 0 else 60.0
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
if i >= skip_first_n_iter:
train_time += dt
train_examples += batch_size
fmt = "Finished iteration {}/{} ({:.2f} images/sec)"
print(fmt.format(i + 1, epoch_iters, batch_size / dt))
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
assert loss < 40, "Exploded gradients"
return (
train_examples,
train_time,
accuracy, loss)
class ExecutorTestBase(TestCase):
def compare_executors(self, model, ref_executor, test_executor, model_run_func):
model.Proto().type = ref_executor
model.param_init_net.set_rand_seed(seed=0xCAFFE2)
model.net.set_rand_seed(seed=0xCAFFE2)
workspace.ResetWorkspace()
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
model_run_func()
ref_ws = {str(k): workspace.FetchBlob(k) for k in workspace.Blobs()}
ref_ws = {k: v for k, v in ref_ws.items() if type(v) is np.ndarray}
workspace.ResetWorkspace()
workspace.RunNetOnce(model.param_init_net)
model.Proto().type = test_executor
workspace.CreateNet(model.net, overwrite=True)
model_run_func()
test_ws = {str(k): workspace.FetchBlob(k) for k in workspace.Blobs()}
test_ws = {k: v for k, v in test_ws.items() if type(v) is np.ndarray}
for blob_name, ref_val in ref_ws.items():
self.assertTrue(
blob_name in test_ws,
"Blob {} not found in {} run".format(blob_name, test_executor))
val = test_ws[blob_name]
np.testing.assert_array_equal(
val, ref_val,
"Blob {} differs in {} run".format(blob_name, test_executor))
|
pytorch-master
|
caffe2/python/test/executor_test_util.py
|
import unittest
import torch
from caffe2.python import core, workspace
# This is a standalone test that doesn't use test_util as we're testing
# initialization and thus we should be the ones calling GlobalInit
@unittest.skipIf(not workspace.has_cuda_support,
"THC pool testing is obscure and doesn't work on HIP yet")
class TestGPUInit(unittest.TestCase):
def testTHCAllocator(self):
cuda_or_hip = 'hip' if workspace.has_hip_support else 'cuda'
flag = '--caffe2_{}_memory_pool=thc'.format(cuda_or_hip)
core.GlobalInit(['caffe2', flag])
# just run one operator
# it's importantant to not call anything here from Torch API
# even torch.cuda.memory_allocated would initialize CUDA context
workspace.RunOperatorOnce(core.CreateOperator(
'ConstantFill', [], ["x"], shape=[5, 5], value=1.0,
device_option=core.DeviceOption(workspace.GpuDeviceType)
))
# make sure we actually used THC allocator
self.assertGreater(torch.cuda.memory_allocated(), 0)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/test/gpu_context_test.py
|
from caffe2.python import core, workspace
from caffe2.python.test.executor_test_util import (
build_conv_model,
build_resnet50_dataparallel_model,
run_resnet50_epoch,
ExecutorTestBase,
executor_test_settings,
executor_test_model_names)
from caffe2.python.test_util import TestCase
from hypothesis import given
import hypothesis.strategies as st
import unittest
EXECUTORS = ["parallel", "async_scheduling"]
ITERATIONS = 1
class ExecutorCPUConvNetTest(ExecutorTestBase):
@given(executor=st.sampled_from(EXECUTORS),
model_name=st.sampled_from(executor_test_model_names()),
batch_size=st.sampled_from([1]),
num_workers=st.sampled_from([8]))
@executor_test_settings
def test_executor(self, executor, model_name, batch_size, num_workers):
model = build_conv_model(model_name, batch_size)
model.Proto().num_workers = num_workers
def run_model():
iterations = ITERATIONS
if model_name == "MLP":
iterations = 1 # avoid numeric instability with MLP gradients
workspace.RunNet(model.net, iterations)
self.compare_executors(
model,
ref_executor="simple",
test_executor=executor,
model_run_func=run_model,
)
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class ExecutorGPUResNetTest(ExecutorTestBase):
@given(executor=st.sampled_from(EXECUTORS),
num_workers=st.sampled_from([8]))
@executor_test_settings
def test_executor(self, executor, num_workers):
model = build_resnet50_dataparallel_model(
num_gpus=workspace.NumGpuDevices(), batch_size=8, epoch_size=8)
model.Proto().num_workers = num_workers
def run_model():
run_resnet50_epoch(model, batch_size=8, epoch_size=8)
self.compare_executors(
model,
ref_executor="simple",
test_executor=executor,
model_run_func=run_model,
)
class ExecutorFailingOpTest(TestCase):
def test_failing_op(self):
def create_failing_net(throw_exception):
net = core.Net("failing_net")
if throw_exception:
net.ThrowException([], [])
else:
net.Fail([], [])
net.Proto().type = "async_scheduling"
return net
workspace.ResetWorkspace()
net = create_failing_net(throw_exception=True)
workspace.CreateNet(net)
with self.assertRaises(RuntimeError):
workspace.RunNet(net)
with self.assertRaises(RuntimeError):
workspace.RunNet(net, allow_fail=True)
workspace.ResetWorkspace()
net = create_failing_net(throw_exception=False)
workspace.CreateNet(net)
with self.assertRaises(RuntimeError):
workspace.RunNet(net)
res = workspace.RunNet(net, allow_fail=True)
self.assertFalse(res)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/test/executor_test.py
|
#!/usr/bin/env python3
import hypothesis.strategies as st
import numpy as np
import torch
from caffe2.python import core
from caffe2.python.test_util import TestCase
from hypothesis import given, settings
from torch import nn
class TestC2LSTM(TestCase):
@given(
bsz=st.integers(1, 5),
seq_lens=st.integers(1, 6),
emb_lens=st.integers(5, 10),
hidden_size=st.integers(3, 7),
num_layers=st.integers(1, 4),
has_biases=st.booleans(),
is_bidirectional=st.booleans(),
batch_first=st.booleans(),
)
@settings(deadline=10000)
def test_c2_lstm(
self,
bsz,
seq_lens,
emb_lens,
hidden_size,
num_layers,
has_biases,
is_bidirectional,
batch_first,
):
net = core.Net("test_net")
num_directions = 2 if is_bidirectional else 1
py_lstm = nn.LSTM(
emb_lens,
hidden_size,
batch_first=batch_first,
bidirectional=is_bidirectional,
bias=has_biases,
num_layers=num_layers,
)
hx = np.zeros((num_layers * num_directions, bsz, hidden_size), dtype=np.float32)
if batch_first:
inputs = np.random.randn(bsz, seq_lens, emb_lens).astype(np.float32)
else:
inputs = np.random.randn(seq_lens, bsz, emb_lens).astype(np.float32)
py_results = py_lstm(torch.from_numpy(inputs))
lstm_in = [
torch.from_numpy(inputs),
torch.from_numpy(hx),
torch.from_numpy(hx),
] + [param.detach() for param in py_lstm._flat_weights]
c2_results = torch.ops._caffe2.InferenceLSTM(
lstm_in, num_layers, has_biases, batch_first, is_bidirectional
)
np.testing.assert_array_almost_equal(
py_results[0].detach().numpy(), c2_results[0].detach().numpy()
)
np.testing.assert_array_almost_equal(
py_results[1][0].detach().numpy(), c2_results[1].detach().numpy()
)
np.testing.assert_array_almost_equal(
py_results[1][1].detach().numpy(), c2_results[2].detach().numpy()
)
|
pytorch-master
|
caffe2/python/test/inference_lstm_op_test.py
|
from caffe2.python import core, workspace
import unittest
core.GlobalInit(['python'])
class BlobDeallocationTest(unittest.TestCase):
def test(self):
net = core.Net('net')
x = net.GivenTensorStringFill([], ['x'], shape=[3], values=['a', 'b', 'c'])
y = net.GivenTensorStringFill([], ['y'], shape=[3], values=['d', 'e', 'f'])
net.Concat([x, y], ['concated', '_'], axis=0)
workspace.ResetWorkspace()
workspace.RunNetOnce(net)
workspace.ResetWorkspace()
workspace.RunNetOnce(net)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/test/blob_deallocation_test.py
|
pytorch-master
|
caffe2/python/test/__init__.py
|
|
import unittest
from caffe2.python.fakefp16_transform_lib import fakeFp16FuseOps
from caffe2.python import core
class Transformer(unittest.TestCase):
def test_fuse(self):
net_swish = core.Net("test_swish")
net_swish_init = core.Net("test_swish_init")
deq = core.CreateOperator("Int8DequantizeNNPI", ["Xq"], ["X"])
swish = core.CreateOperator("SwishFakeFp16NNPI", ["X"], ["Y"])
quant = core.CreateOperator("Int8QuantizeNNPI", ["Y"], ["Y_q"])
net_swish.Proto().op.extend(
[
deq, swish, quant
]
)
print(net_swish.Proto())
out_net = fakeFp16FuseOps(net_swish.Proto())
assert(len(out_net.op) == 1)
|
pytorch-master
|
caffe2/python/test/fakefp16_transform_test.py
|
# make sure we use cpp implementation of protobuf
import os
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
# then import protobuf
from caffe2.proto import caffe2_pb2, metanet_pb2
import unittest
class TestCrossProtoCalls(unittest.TestCase):
def testSimple(self):
net = caffe2_pb2.NetDef()
meta = metanet_pb2.MetaNetDef()
# if metanet_pb2 wasn't initialized properly the following fails with a
# cryptic message: "Parameter to MergeFrom() must be instance of same
# class: expected caffe2.NetDef got caffe2.NetDef."
meta.nets.add(key="foo", value=net)
|
pytorch-master
|
caffe2/python/test/python_protobuf_test.py
|
## @package onnx
# Module caffe2.python.onnx.backend_rep_cpp
from onnx.backend.base import BackendRep, namedtupledict
# This is a wrapper around C++ Caffe2BackendRep,
# mainly to handle the different input and output types for convenience of Python
class Caffe2CppRep(BackendRep):
def __init__(self, cpp_rep):
super(Caffe2CppRep, self).__init__()
self.__core = cpp_rep
self.__external_outputs = cpp_rep.external_outputs()
self.__external_inputs = cpp_rep.external_inputs()
self.__uninitialized_inputs = cpp_rep.uninitialized_inputs()
def init_net(self):
return self.__core.init_net()
def pred_net(self):
return self.__core.pred_net()
def external_outputs(self):
return self.__core.external_outputs()
def external_inputs(self):
return self.__core.external_inputs()
def run(self, inputs):
output_values = None
if isinstance(inputs, dict):
output_values = self.__core.run(inputs)
elif isinstance(inputs, list) or isinstance(inputs, tuple):
if len(inputs) != len(self.__uninitialized_inputs):
raise RuntimeError('Expected {} values for uninitialized '
'graph inputs ({}), but got {}.'.format(
len(self.__uninitialized_inputs),
', '.join(self.__uninitialized_inputs),
len(inputs)))
input_map = {}
for k, v in zip(self.__uninitialized_inputs, inputs):
input_map[k] = v
output_values = self.__core.run(input_map)
else:
# single input
output_values = self.__core.run([inputs])
return namedtupledict('Outputs', self.__external_outputs)(*output_values)
|
pytorch-master
|
caffe2/python/onnx/backend_cpp_rep.py
|
## @package onnx
# Module caffe2.python.onnx.backend
"""Backend for running ONNX on Caffe2
To run this, you will need to have Caffe2 installed as well.
"""
import collections
import sys
import zipfile
import itertools
# When onnx is built against a version of protobuf that is older than
# that which is vendored with caffe2, onnx will crash if caffe2's
# vendored protobuf is loaded first. We can work around this by
# importing onnx first, which will cause it to go out and pick up the
# system protobuf.
import onnx.backend
from caffe2.python import core, workspace, rnn_cell, gru_cell
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import caffe2.python.utils
import numpy as np
import onnx
from onnx import TensorProto
import onnx.numpy_helper
import onnx.defs
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict
from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep
import caffe2.python._import_c_extension as C
import warnings
def force_unicode(s):
try:
return s.decode('utf-8')
except AttributeError:
return s
def get_device_option(device):
m = {DeviceType.CPU: caffe2_pb2.CPU,
DeviceType.CUDA: workspace.GpuDeviceType}
return core.DeviceOption(m[device.type], device.device_id)
class OnnxAttributes(dict):
"""
This is a more convenient way to work with ONNX/Caffe2 attributes
that is not the protobuf representation.
"""
@staticmethod
def from_onnx(args):
d = OnnxAttributes()
for arg in args:
d[arg.name] = convertAttributeProto(arg)
return d
def caffe2(self, kmap=lambda k: k):
for k, v in self.items():
if kmap(k) != '':
yield caffe2.python.utils.MakeArgument(kmap(k), v)
# TODO: Move this into ONNX main library
def convertAttributeProto(onnx_arg):
"""
Convert an ONNX AttributeProto into an appropriate Python object
for the type.
NB: Tensor attribute gets returned as the straight proto.
"""
if onnx_arg.HasField('f'):
return onnx_arg.f
elif onnx_arg.HasField('i'):
return onnx_arg.i
elif onnx_arg.HasField('s'):
return onnx_arg.s
elif onnx_arg.HasField('t'):
return onnx_arg.t # this is a proto!
elif onnx_arg.HasField('g'):
return Caffe2Backend._graph_to_net(onnx_arg.g, Caffe2Backend._known_opset_version)
elif len(onnx_arg.floats):
return list(onnx_arg.floats)
elif len(onnx_arg.ints):
return list(onnx_arg.ints)
elif len(onnx_arg.strings):
return list(onnx_arg.strings)
elif len(onnx_arg.graphs):
retval = []
# TODO: this doesn't work with RNN ops
for g in onnx_arg.graphs:
retval.append(Caffe2Backend._graph_to_net(g, Caffe2Backend._known_opset_version))
return retval
else:
raise ValueError("Unsupported ONNX attribute: {}".format(onnx_arg))
# TODO: Move this into ONNX main library
class OnnxNode(object):
"""
Reimplementation of NodeProto from ONNX, but in a form
more convenient to work with from Python.
We may temporarily edit these nodes to get them into Caffe2 form,
before actually translating into the Caffe2 protobuf, since this
is easier than decomposing everything, and putting it back together
when we're ready.
"""
def __init__(self, node):
self.name = str(node.name)
self.op_type = str(node.op_type)
self.attrs = OnnxAttributes.from_onnx(node.attribute)
self.inputs = list(node.input)
self.outputs = list(node.output)
Caffe2Ops = collections.namedtuple('Caffe2Ops', ['ops', 'init_ops', 'interface_blobs'])
class Caffe2Backend(Backend):
# The greatest version of the ONNX operator set which we are aware of.
# Models whose version is larger than this will cause us to emit a warning
# that we are attempting to translate on a "best effort" basis.
#
# If you increase this, make SURE you cross-reference all BC-breaking
# changes from one version to the next, and any that you did not
# implement, mark as broken in _broken_operators
_known_opset_version = 9
# This dictionary will record operators which are KNOWN to be
# broken, so we give a good error message rather than do something
# bogus and then fail.
_broken_operators = {
# 'BrokenOp': version_it_was_broken_in
}
# Operators that are different between Caffe2 and
# ONNX but only in their name.
# In most cases, this should be empty - as the effort of ONNX is
# to unify the operator definitions.
_renamed_operators = {
'GlobalMaxPool': 'MaxPool',
'GlobalAveragePool': 'AveragePool',
'Pad': 'PadImage',
'Neg': 'Negative',
'BatchNormalization': 'SpatialBN',
'InstanceNormalization': 'InstanceNorm',
'MatMul': 'BatchMatMul',
'Upsample': 'ResizeNearest',
'Identity': 'Copy',
'InstanceNormalization': 'InstanceNorm',
'Equal': 'EQ',
'Less': 'LT',
'Greater': 'GT',
'Unsqueeze': 'ExpandDims',
'Loop': 'ONNXWhile',
'Tile': 'NumpyTile',
'RandomNormal': 'GaussianFill',
'RandomUniform': 'UniformFill',
}
_global_renamed_attrs = {'kernel_shape': 'kernels'}
_per_op_renamed_attrs = {
'Squeeze': {'axes': 'dims'},
'Unsqueeze': {'axes': 'dims'},
'Transpose': {'perm': 'axes'},
'Upsample': {'mode': '',
'scales': ''},
'ConvTranspose': {'output_padding': 'adjs'},
'Selu': {'gamma': 'scale'},
'If': {'then_branch': 'then_net',
'else_branch': 'else_net'},
'RandomUniform': {'low': 'min',
'high': 'max'}
}
# operators whose behavior is different beyond renaming
# the value is an attribute of this class that is a
# function from ToffeIR node_def to caffe2 op_def
_special_operators = {
'LSTM': '_create_rnn_variant',
'GRU': '_create_rnn_variant',
'RNN': '_create_rnn_variant',
'Loop': '_create_loop',
'If': '_create_if',
'Upsample': '_create_upsample',
'RandomNormal': '_create_gaussian_fill'
}
# Dummy name generator
_dummy_name = C.DummyName()
@classmethod
def dummy_name(cls):
return cls._dummy_name.new_dummy_name()
# NB: By default, you will use the LATEST definition of the operator,
# so this interface MAY make BC-breaking changes. Specify an
# opset_version if you don't want this to version.
@classmethod
def run_node(cls, node, inputs, device='CPU', opset_version=_known_opset_version, outputs_info=None):
super(Caffe2Backend, cls).run_node(node, inputs, device=device,
outputs_info=outputs_info, opset_version=opset_version)
value_infos = []
device_option = get_device_option(Device(device))
ws = Workspace()
with core.DeviceScope(device_option): # temporary!
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value)
value_infos.append(onnx.helper.make_tensor_value_info(
name=key,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
shape=value.shape).SerializeToString())
else:
assert len(node.input) == len(inputs), "{}: expected {} but got {}".format(
node.op_type, len(node.input), len(inputs))
for key, value in zip(node.input, inputs):
ws.FeedBlob(key, value)
value_infos.append(onnx.helper.make_tensor_value_info(
name=key,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
shape=value.shape).SerializeToString())
ops = []
cbackend = C.Caffe2Backend(cls._dummy_name)
ops_str = cbackend.convert_node(node.SerializeToString(), value_infos, opset_version)
for s in ops_str[0] + ops_str[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op.device_option.CopyFrom(device_option)
ops.append(op)
ws.RunOperatorsOnce(ops)
output_values = [ws.FetchBlob(name) for name in node.output]
return namedtupledict('Outputs', node.output)(*output_values)
@classmethod
def _create_tensor_filling_op(cls, onnx_tensor, name=None):
"""
Given an Onnx TensorProto, translate it into a Caffe2 operator
which produces the given tensor filling op.
"""
assert name or onnx_tensor.name
name = name or onnx_tensor.name
c2_op = caffe2_pb2.OperatorDef()
c2_values = c2_op.arg.add()
c2_values.name = "values"
def tensor2list(onnx_tensor):
# Use the onnx.numpy_helper because the data may be raw
return onnx.numpy_helper.to_array(onnx_tensor).flatten().tolist()
if onnx_tensor.data_type in [TensorProto.FLOAT]:
c2_op.type = 'GivenTensorFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.DOUBLE]:
c2_op.type = 'GivenTensorDoubleFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.INT64,
TensorProto.UINT32]:
c2_op.type = 'GivenTensorInt64Fill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.UINT8,
TensorProto.INT8,
TensorProto.UINT16,
TensorProto.INT16,
TensorProto.INT32]:
c2_op.type = 'GivenTensorIntFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.BOOL:
c2_op.type = 'GivenTensorBoolFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.STRING:
c2_op.type = 'GivenTensorStringFill'
c2_values.strings.extend(onnx_tensor.string_data)
else:
raise RuntimeError(
"unrecognized tensor type {}".format(onnx_tensor.data_type))
c2_shape = c2_op.arg.add()
c2_shape.name = "shape"
c2_shape.ints.extend(onnx_tensor.dims)
c2_op.output.append(name)
return c2_op
@classmethod
def _rnn_reform_weights(cls, reforms, name, hidden_size, init_net, gates, reorder_indices):
for name_from, name_to, do_concat, extra_dims in reforms:
gate_blobs = ['%s/%s_%s' % (name, prefix, name_to) for prefix in gates]
for i, x in enumerate(gate_blobs):
dim0 = i * hidden_size, (i+1) * hidden_size
starts, ends = zip(dim0, *extra_dims)
init_net.Slice(name_from, x, starts=starts, ends=ends)
if do_concat:
reordered_gate_blobs = [gate_blobs[i] for i in reorder_indices]
init_net.Concat(reordered_gate_blobs, ['%s/%s' % (name, name_to), cls.dummy_name()], axis=0)
@classmethod
def _make_rnn_direction(cls, input_blob, B, W, R, initial_states_and_names, sequence_lens,
pred_mh, init_net,
input_size, hidden_size, num_gates, direction_offset,
Bi, Br, W_, R_,
reform, make_cell, keep_outputs):
name = cls.dummy_name()
# input and recurrence biases are squashed together in onnx
# but not in caffe2
gates_hidden_size = num_gates * hidden_size
bias_offset = 2 * direction_offset * gates_hidden_size
weight_offset = direction_offset * gates_hidden_size
Bi = init_net.Slice(B, name + Bi,
starts=[bias_offset + 0 * gates_hidden_size],
ends =[bias_offset + 1 * gates_hidden_size])
Br = init_net.Slice(B, name + Br,
starts=[bias_offset + 1 * gates_hidden_size],
ends =[bias_offset + 2 * gates_hidden_size])
W_ = init_net.Slice(W, name + W_,
starts=[weight_offset + 0 * gates_hidden_size, 0],
ends =[weight_offset + 1 * gates_hidden_size,-1])
R_ = init_net.Slice(R, name + R_,
starts=[weight_offset + 0 * gates_hidden_size, 0],
ends =[weight_offset + 1 * gates_hidden_size,-1])
initial_states_sliced = []
for initial_state, name_suffix in initial_states_and_names:
initial_states_sliced.append(
pred_mh.net.Slice(initial_state, name + name_suffix,
starts=[direction_offset + 0, 0, 0],
ends =[direction_offset + 1,-1,-1]))
if direction_offset == 1:
if sequence_lens is not None:
seq_lens_for_reverse = sequence_lens
else:
input_shape = pred_mh.net.Shape(input_blob, name + '/input_shape')
batch_size = pred_mh.net.Slice(input_shape, name + '/batch_size_slice', starts=[1], ends=[2])
seq_len = pred_mh.net.Slice(input_shape, name + '/seq_len_slice', starts=[0], ends=[1])
dummy_sequence_lens = pred_mh.net.Tile([seq_len, batch_size], name + '/dummy_sequence_lens', axis=0)
pred_mh.net.Reshape(dummy_sequence_lens, [dummy_sequence_lens, cls.dummy_name()], shape=[-1])
seq_lens_for_reverse = pred_mh.net.Cast(dummy_sequence_lens, name + '/seq_lens_for_reverse', to=core.DataType.INT32)
reform(Bi, Br, W_, R_, name, hidden_size, init_net)
if direction_offset == 1:
input = pred_mh.net.ReversePackedSegs(
[input_blob, seq_lens_for_reverse], name + "/input-reversed")
else:
input = input_blob
outputs = keep_outputs(list(make_cell(
pred_mh,
input,
sequence_lens,
initial_states_sliced,
input_size,
hidden_size,
name,
drop_states=False,
forward_only=True,
)))
if direction_offset == 1:
outputs[0] = pred_mh.net.ReversePackedSegs(
[outputs[0], seq_lens_for_reverse], name + "/output-reversed")
return outputs
@classmethod
def _create_rnn_variant(cls, init_model, pred_model, n, opset_version):
assert init_model is not None, "cannot convert RNNs without access to the full model"
assert pred_model is not None, "cannot convert RNNs without access to the full model"
attrs = dict(n.attrs) # make a copy, which is safe to mutate
hidden_size = attrs.pop('hidden_size')
direction = force_unicode(attrs.pop('direction', 'forward'))
if n.op_type == 'RNN':
activation = force_unicode(attrs.pop('activations', ('tanh',))[0].lower())
elif n.op_type == 'GRU':
linear_before_reset = attrs.pop('linear_before_reset', 0)
assert not attrs, "unsupported RNN attributes: " + str(attrs.keys())
assert direction in ['forward', 'bidirectional'], "unsupported backwards RNN/GRU/LSTM"
if n.op_type in ['RNN', 'GRU']:
input_blob, W, R, B, sequence_lens, initial_h = n.inputs
elif n.op_type == 'LSTM':
input_blob, W, R, B, sequence_lens, initial_h, initial_c = n.inputs
if sequence_lens == "":
sequence_lens = None
for x in itertools.chain(init_model.graph.input,
init_model.graph.value_info,
pred_model.graph.input,
pred_model.graph.value_info):
if x.name == W:
input_size = x.type.tensor_type.shape.dim[2].dim_value
break
else:
raise RuntimeError("best-effort shape inference for RNN/GRU/LSTM failed")
pred_mh = ModelHelper()
init_net = core.Net("init-net")
init_net.Reshape(W, [W, cls.dummy_name()], shape=[1,-1,0])
init_net.Squeeze(W, W, dims=[0])
init_net.Reshape(R, [R, cls.dummy_name()], shape=[1,-1,0])
init_net.Squeeze(R, R, dims=[0])
init_net.Reshape(B, [B, cls.dummy_name()], shape=[1,-1])
init_net.Squeeze(B, B, dims=[0])
if n.op_type == 'RNN':
def reform(*args):
pass
def make_cell(*args, **kwargs):
return rnn_cell.BasicRNN(*args, activation=activation, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 1, direction_offset,
"/i2h_b", "/gates_t_b", "/i2h_w", "/gates_t_w",
reform, make_cell, lambda x: x)
elif n.op_type == 'GRU':
def reform(Bi, Br, W_, R_, name, hidden_size, init_net):
# caffe2 has a different order from onnx. We need to rearrange
# z r h -> r z h
reforms = ((W_, 'i2h_w', True, [(0,-1)]),
(R_, 'gate_t_w', False, [(0,-1)]),
(Bi, 'i2h_b', True, []),
(Br, 'gate_t_b', False, []))
cls._rnn_reform_weights(reforms, name, hidden_size, init_net,
['update', 'reset', 'output'], [1, 0, 2])
def make_cell(*args, **kwargs):
return gru_cell.GRU(*args, linear_before_reset=linear_before_reset, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 3, direction_offset,
"_bias_i2h", "_bias_gates", "/i2h_w_pre", "/gates_t_w_pre",
reform, make_cell, lambda x: x)
elif n.op_type == 'LSTM':
def reform(Bi, Br, W_, R_, name, hidden_size, init_net):
# caffe2 has a different order from onnx. We need to rearrange
# i o f c -> i f o c
reforms = ((W_, 'i2h_w', True, [(0, -1)]),
(R_, 'gates_t_w', True, [(0, -1)]),
(Bi, 'i2h_b' , True, []),
(Br, 'gates_t_b', True, []))
cls._rnn_reform_weights(reforms, name, hidden_size, init_net,
['input', 'output', 'forget', 'cell'], [0, 2, 1, 3])
def make_cell(*args, **kwargs):
return rnn_cell.LSTM(*args, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h'), (initial_c, '/initial_c')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 4, direction_offset,
"/i2h_b", "/gates_t_b", "/i2h_w", "/gates_t_w",
reform, make_cell, lambda x: [x[0], x[1], x[3]])
if direction == 'forward':
outputs = make_rnn(0)
# in the forward case, storage is shared between the
# last outputs. We need to decouple them so that the
# VariableLengthSequencePadding only mutates
# n.outputs[0]
for i in range(1, len(outputs)):
pred_mh.net.Copy(outputs[i], n.outputs[i])
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[outputs[0], sequence_lens], [outputs[0]])
pred_mh.net.ExpandDims([outputs[0]], [n.outputs[0]], dims=[1])
elif direction == 'bidirectional':
outputs_f = make_rnn(0)
outputs_b = make_rnn(1)
concatted_output, _ = pred_mh.net.Concat(
[outputs_f[0], outputs_b[0]], [cls.dummy_name(), cls.dummy_name()], axis=2)
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[concatted_output, sequence_lens], [concatted_output])
reshaped_output, _ = pred_mh.net.Reshape(concatted_output, [cls.dummy_name(), cls.dummy_name()], shape=[0,0,-1,2])
pred_mh.net.Transpose(reshaped_output, n.outputs[0], axes=[0,2,1,3])
for i in range(1, len(n.outputs)):
pred_mh.net.Concat([outputs_f[i], outputs_b[i]],
[n.outputs[i], cls.dummy_name()], axis=0)
# We want to decide whether to put all of our weight-reshaping
# operators in the init net or the predict net. We can put
# them in the init net iff the inputs to those operators are
# already available, either as graph initializers, or as the
# output of other operators in the init net. The latter case
# occurs, for example, when exporting from pytorch to onnx.
# In most production use, we expect has_initializers to be
# true.
initializers = {i.name for i in init_model.graph.initializer}
outputs = {output for node in init_model.graph.node for output in node.output}
has_initializers = all(x in initializers or x in outputs for x in (W, R, B))
pred_ops = []
init_ops = []
(init_ops if has_initializers else pred_ops).extend(init_net.Proto().op)
pred_ops.extend(pred_mh.Proto().op)
return Caffe2Ops(pred_ops, init_ops, list(pred_mh.Proto().external_input))
@classmethod
def _create_control_op(cls, init_model, pred_model, n, opset_version):
control_inputs = []
if '__control_inputs' in n.attrs:
control_inputs.extend(n.attrs['__control_inputs'])
node = cls._common_onnx_node_to_caffe2_op(init_model, pred_model, n, opset_version)
node.control_input.extend(control_inputs)
return Caffe2Ops([node], [], [])
@classmethod
def _remove_ssa(cls, net, remap_dict):
for op in net.op:
for i, name in enumerate(op.output):
if name in remap_dict:
op.output[i] = remap_dict[name]
for i, out in enumerate(net.external_output):
if out in remap_dict:
net.external_output[i] = remap_dict[out]
@classmethod
def _create_if(cls, init_model, pred_model, n, opset_version):
ops = cls._create_control_op(init_model, pred_model, n, opset_version)
assert ops[0][0].type == 'If'
if_op = ops[0][0]
then_net = else_net = None
control_inputs = []
for arg in if_op.arg:
if arg.name == 'then_net':
then_net = arg.n
if arg.name == 'else_net':
else_net = arg.n
if arg.name == '__control_inputs':
control_inputs = arg.strings
assert then_net and else_net
then_net_outs = then_net.external_output
else_net_outs = else_net.external_output
op_outputs = if_op.output
assert len(then_net_outs) == len(else_net_outs)
assert len(else_net_outs) == len(op_outputs)
for arg in if_op.arg:
if arg.name == 'then_net':
arg.n.external_input.extend(control_inputs)
if arg.name == 'else_net':
arg.n.external_input.extend(control_inputs)
return ops
@classmethod
def _create_loop(cls, init_model, pred_model, n, opset_version):
ops = cls._create_control_op(init_model, pred_model, n, opset_version)
assert ops[0][0].type == 'ONNXWhile'
while_op = ops[0][0]
while_op.arg.extend([caffe2.python.utils.MakeArgument('has_trip_count', True)])
while_op.arg.extend([caffe2.python.utils.MakeArgument('has_cond', True)])
while_op.arg.extend([caffe2.python.utils.MakeArgument('disable_scopes', True)])
control_inputs = []
for arg in while_op.arg:
if arg.name == '__control_inputs':
control_inputs = arg.strings
num_loop_carried_deps = 0
for arg in while_op.arg:
if arg.name == 'body':
num_loop_carried_deps = len(arg.n.external_input) - 2
arg.n.external_input.extend(control_inputs)
while_op.arg.extend([
caffe2.python.utils.MakeArgument('num_loop_carried_deps',
num_loop_carried_deps)
])
return ops
@classmethod
def _substitute_raw_value(cls, tp, raw_values_dict):
if tp.HasField('raw_data') and tp.raw_data == bytes(b'__EXTERNAL'):
if tp.name not in raw_values_dict:
raise RuntimeError('TensorProto for value {} referenced raw data but it was not found!'.format(tp.name))
else:
tp.raw_data = raw_values_dict[tp.name]
@classmethod
def _visit_and_substitute_raw_values(cls, nodes, raw_values_dict):
for node in nodes:
for attr in node.attribute:
if attr.HasField('t'):
cls._substitute_raw_value(attr.t, raw_values_dict)
for t in attr.tensors:
cls._substitute_raw_value(t, raw_values_dict)
if attr.HasField('g'):
cls._visit_and_substitute_raw_values(attr.g.node, raw_values_dict)
for g in attr.graphs:
cls._visit_and_substitute_raw_values(g.node, raw_values_dict)
@classmethod
def _external_value_resolution_pass(cls, model, raw_values_dict):
for init in model.graph.initializer:
cls._substitute_raw_value(init, raw_values_dict)
cls._visit_and_substitute_raw_values(model.graph.node, raw_values_dict)
@classmethod
def _direct_initialize_parameters(cls, initializer, ws, device_option):
for tp in initializer:
ws.FeedBlob(tp.name, onnx.numpy_helper.to_array(tp), device_option)
@classmethod
def _direct_initialize_inputs(cls, inputs, initialized, ws, device_option):
for value_info in inputs:
if value_info.name in initialized:
continue
shape = list(d.dim_value for d in value_info.type.tensor_type.shape.dim)
ws.FeedBlob(
value_info.name,
np.ones(shape, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[value_info.type.tensor_type.elem_type]),
device_option)
@staticmethod
def optimize_onnx(input, init=False, predict=False):
passes = ['fuse_consecutive_transposes',
'eliminate_nop_transpose',
'fuse_transpose_into_gemm',
'lift_lexical_references']
if init:
passes.append('split_init')
if predict:
passes.append('split_predict')
try:
out = onnx.optimizer.optimize(input, passes)
except AttributeError:
warnings.warn("OptimizerWarning: optimizer module not found in ONNX version {}".format(onnx.__version__))
# ONNX does no ship onnx.optimizer since version 1.9+
import onnxoptimizer
out = onnxoptimizer.optimize(input, passes)
return out
@classmethod
def prepare_zip_archive(cls, file, device='CPU', **kwargs):
with zipfile.ZipFile(file, mode='r') as z:
with z.open('__MODEL_PROTO', 'r') as f:
model = onnx.load(f);
blob_names = set(z.namelist()) - set('__MODEL_PROTO')
# TODO: make this more efficient
raw_values_dict = {}
for name in blob_names:
with z.open(name, 'r') as blob_file:
raw_values_dict[name] = blob_file.read()
return cls.prepare(model, device, raw_values_dict=raw_values_dict, **kwargs)
@classmethod
def prepare(cls, model, device='CPU', raw_values_dict=None, **kwargs):
'''
For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,
for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
there is no way we can know which blob is the input of the predict_graph.
'''
if not kwargs.pop('no_check_UNSAFE', False):
super(Caffe2Backend, cls).prepare(model, device, **kwargs)
opset_version = None
for imp in model.opset_import:
if not imp.HasField("domain") or imp.domain == "":
opset_version = imp.version
if imp.version > cls._known_opset_version:
warnings.warn("This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.".format(cls._known_opset_version, imp.version))
else:
warnings.warn("Unrecognized operator set {}".format(imp.domain))
if opset_version is None:
if model.ir_version >= 0x00000003:
raise RuntimeError("Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)")
else:
opset_version = 1
# Prior to onnx version update to onnx-1.8.0, errors caused by failures in
# in the onnx shape inference call were being supressed. Hence a try-catch block
# is added around the infer_shapes call to avoid these failures and preserve status
try:
model = onnx.shape_inference.infer_shapes(model)
except RuntimeError:
warnings.warn("ShapeInferenceWarning: Inferred shape and existing shape differ in rank")
ws = Workspace()
device_option = get_device_option(Device(device))
init_net, predict_net = cls._onnx_model_to_caffe2_net(model, device, opset_version, False)
if raw_values_dict:
cls._external_value_resolution_pass(model, raw_values_dict)
# Directly load initializer data into blobs in workspace
cls._direct_initialize_parameters(
model.graph.initializer,
ws,
device_option,
)
initialized = {init.name for init in model.graph.initializer}
cls._direct_initialize_inputs(
model.graph.input,
initialized,
ws,
device_option,
)
uninitialized = [value_info.name for value_info in model.graph.input if value_info.name not in initialized]
retval = Caffe2Rep(init_net, predict_net, ws, uninitialized)
return retval
@classmethod
# TODO: This method needs a refactor for clarity
def _onnx_node_to_caffe2_op(cls, init_model, pred_model, node_def, opset_version):
cbackend = C.Caffe2Backend(cls._dummy_name)
if cbackend.support_onnx_import(node_def.op_type):
# extract value infos from pred model (value infos of
# node's inputs that are in init model should be all
# available in pred model)
value_infos = []
for name in node_def.input:
if pred_model is not None:
for vi in itertools.chain(pred_model.graph.input,
pred_model.graph.output,
pred_model.graph.value_info):
if vi.name == name:
value_infos.append(vi.SerializeToString())
op_strs = cbackend.convert_node(node_def.SerializeToString(), value_infos, opset_version)
init_ops = []
for s in op_strs[0]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
init_ops.append(op)
ops = []
for s in op_strs[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
ops.append(op)
return Caffe2Ops(ops, init_ops, [])
if node_def.op_type in cls._special_operators:
translator = getattr(cls, cls._special_operators[node_def.op_type])
else:
translator = cls._common_onnx_node_to_caffe2_op
ops = translator(init_model, pred_model, OnnxNode(node_def), opset_version)
if isinstance(ops, Caffe2Ops):
return ops
if not isinstance(ops, collections.abc.Iterable):
ops = [ops]
return Caffe2Ops(ops, [], [])
_broadcast_operators = {
'Add',
'Sub',
}
@classmethod
def _common_onnx_node_to_caffe2_op(cls, init_model, pred_model, onnx_node, opset_version):
"""
This translator performs the basic translation of ONNX nodes into
Caffe2 operators. Besides doing a straightforward marshalling from
one format to another, it also does these extra things:
- Renames operators based on '_renamed_operators'
- Renames attributes based on '_global_renamed_attrs' and
'_per_op_renamed_attrs'
If you're writing a custom translator, consider calling this first,
and then fixing things up further.
"""
c2_op = caffe2_pb2.OperatorDef()
c2_op.input.extend(onnx_node.inputs)
c2_op.output.extend(onnx_node.outputs)
c2_op.name = onnx_node.name
onnx_op_type = onnx_node.op_type
broken_version = cls._broken_operators.get(onnx_op_type, float('Inf'))
if broken_version <= opset_version:
raise ValueError(
"Don't know how to translate op {} in ONNX operator set v{} (I only support prior to v{})".format(onnx_op_type, opset_version, broken_version))
c2_op.type = cls._renamed_operators.get(onnx_op_type, onnx_op_type)
if not core.IsOperator(c2_op.type):
raise ValueError(
"Don't know how to translate op {}".format(onnx_op_type))
def kmap(k):
if (onnx_op_type in cls._per_op_renamed_attrs and
k in cls._per_op_renamed_attrs[onnx_op_type]):
return cls._per_op_renamed_attrs[onnx_op_type][k]
if k in cls._global_renamed_attrs:
return cls._global_renamed_attrs[k]
return k
c2_op.arg.extend(onnx_node.attrs.caffe2(kmap=kmap))
if opset_version < 7:
# onnx opset 7 and newest caffe2 have adopted full onnx broadcast semantics
# so we don't need this hack anymore
if c2_op.type in cls._broadcast_operators:
already_broadcast = False
for arg in c2_op.arg:
if arg.name == 'broadcast':
already_broadcast = True
if not already_broadcast:
c2_op.arg.extend([caffe2.python.utils.MakeArgument('broadcast', 1)])
return c2_op
@staticmethod
def _all_names_in_graph(graph):
if graph is None:
return set()
names = set()
names.update(value_info.name for value_info in graph.input)
names.update(value_info.name for value_info in graph.output)
for node in graph.node:
names.update(node.input)
names.update(node.output)
return names
@classmethod
def _graph_to_net(cls, onnx_graph, opset_version):
net = caffe2_pb2.NetDef()
for node in onnx_graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
None, None, node, opset_version)
except Exception as e:
print('ONNX FATAL:', e)
continue
net.op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in onnx_graph.output)
net.external_input.extend(
value_info.name for value_info in onnx_graph.input)
return net
@classmethod
def _onnx_model_to_caffe2_net(cls, onnx_model, device, opset_version, include_initializers):
device_option = get_device_option(Device(device))
# Prior to onnx version update to onnx-1.8.0, errors caused by failures in
# in the onnx shape inference call were being supressed. Hence a try-catch block
# is added around the infer_shapes call to avoid these failures and preserve status
try:
onnx_model = onnx.utils.polish_model(onnx_model)
except RuntimeError:
warnings.warn("ShapeInferenceWarning: Inferred shape and existing shape differ in rank")
except AttributeError:
warnings.warn("ShapeInferenceWarning: utils module not found in ONNX version {}".format(onnx.__version__))
# Optimizer module has been removed in ONNX-1.9 or later, warn caller if that is the case
try:
init_model = cls.optimize_onnx(onnx_model, init=True)
pred_model = cls.optimize_onnx(onnx_model, predict=True)
except ModuleNotFoundError:
warnings.warn("OptimizerWarning: onnxoptimizer module not installed. "
"init_model and pred_model models will not be splitted, which can cause a runtime error")
init_model = onnx_model
pred_model = onnx_model
init_net = caffe2_pb2.NetDef()
pred_net = caffe2_pb2.NetDef()
init_net.name = onnx_model.graph.name + '_init'
pred_net.name = onnx_model.graph.name + '_predict'
if include_initializers:
init_net.op.extend(cls._create_tensor_filling_op(tp) for tp in onnx_model.graph.initializer)
cls._dummy_name.reset(cls._all_names_in_graph(init_model.graph) | cls._all_names_in_graph(pred_model.graph))
errors = []
for net, model in ( (init_net, init_model), (pred_net, pred_model) ):
net.device_option.CopyFrom(device_option)
for node in model.graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
init_model, pred_model, node, opset_version)
except Exception as e:
msg = 'Error while processing node: {}. Exception: {}'.format(node, e)
errors.append(msg)
print('ONNX FATAL:', msg, file=sys.stderr)
continue
init_net.op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in model.graph.output)
net.external_input.extend(
value_info.name for value_info in model.graph.input)
if len(errors) > 0:
raise RuntimeError(
"ONNX conversion failed, encountered {} errors:\n\n{}".format(
len(errors), "\n\n".join(errors)))
return init_net, pred_net
# wrapper for backwards compatibility
@classmethod
def onnx_graph_to_caffe2_net(cls, model, device="CPU", opset_version=_known_opset_version):
return cls._onnx_model_to_caffe2_net(model, device=device, opset_version=opset_version, include_initializers=True)
@classmethod
def supports_device(cls, device_str):
device = Device(device_str)
if device.type == DeviceType.CPU:
return True
elif core.IsGPUDeviceType(device.type):
return workspace.has_gpu_support
return False
@classmethod
def is_compatible(cls, model, device='CPU', **kwargs):
if hasattr(super(Caffe2Backend, cls), 'is_compatible') \
and callable(super(Caffe2Backend, cls).is_compatible):
if not super(Caffe2Backend, cls).is_compatible(model, device, **kwargs):
return False
# TODO: should have an unspported list of operators, be optimistic for now
return True
prepare = Caffe2Backend.prepare
prepare_zip_archive = Caffe2Backend.prepare_zip_archive
run_node = Caffe2Backend.run_node
run_model = Caffe2Backend.run_model
supports_device = Caffe2Backend.supports_device # noqa
is_compatible = Caffe2Backend.is_compatible
|
pytorch-master
|
caffe2/python/onnx/backend.py
|
## @package onnx
# Module caffe2.python.onnx.error
class BaseException(Exception): pass
class Unsupported(BaseException): pass
|
pytorch-master
|
caffe2/python/onnx/error.py
|
## @package onnx
# Module caffe2.python.onnx.frontend
"""Caffe2 Protobuf to ONNX converter
To run this, you will need to have Caffe2 installed as well.
"""
import collections
import itertools
import logging
import re
from caffe2.python import core as caffe2_core
from onnx import (checker, helper, numpy_helper, mapping,
GraphProto, NodeProto, TensorProto, OperatorSetIdProto)
from onnx.helper import make_tensor_value_info, make_model
import numpy as np
from caffe2.python.onnx.helper import c2_native_run_net
import caffe2.python._import_c_extension as C
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Caffe2Frontend(object):
# This number controls the semantics of the operators we target. Whenever
# ONNX makes a BC breaking change to semantics of operators, having this set
# to an accurate number will prevent our models form exporting. However,
# we should strive to keep this up-to-date as much as possible.
target_opset_version = 9
_renamed_operators = {
'SpatialBN': 'BatchNormalization',
'Conv1D': 'Conv',
'Conv2D': 'Conv',
'Conv3D': 'Conv',
'ConvTranspose1D': 'ConvTranspose',
'ConvTranspose2D': 'ConvTranspose',
'ConvTranspose3D': 'ConvTranspose',
'MaxPool1D': 'MaxPool',
'MaxPool2D': 'MaxPool',
'MaxPool3D': 'MaxPool',
'AveragePool1D': 'AveragePool',
'AveragePool2D': 'AveragePool',
'AveragePool3D': 'AveragePool',
}
# caffe2 arguments that are completely removed in onnx
_blocklist_caffe2_args = {
'order': {b'NCHW'},
'cudnn_exhaustive_search': {0, 1},
'exhaustive_search': {0, 1},
'use_cudnn': {0, 1},
}
_global_renamed_args = {
'kernels': 'kernel_shape',
}
_per_op_renamed_args = {
'Squeeze': {'dims': 'axes'},
'Transpose': {'axes': 'perm'},
}
_special_operators = {}
# Dummy name generator
_dummy_name = C.DummyName()
@classmethod
def dummy_name(cls):
return cls._dummy_name.new_dummy_name()
@classmethod
def _common_caffe2_arg_to_onnx_attr(cls, op_def, arg):
# name
op_type = op_def.type
name = cls._global_renamed_args.get(arg.name, arg.name)
if op_type in cls._per_op_renamed_args:
# Per-op attribute renames override the global attribute renames
name = cls._per_op_renamed_args[op_type].get(arg.name, name)
# value
if arg.HasField('f'):
value = arg.f
elif arg.HasField('i'):
value = arg.i
elif arg.HasField('s'):
value = arg.s
elif arg.floats:
value = arg.floats
elif arg.ints:
value = arg.ints
elif arg.strings:
value = arg.strings
else:
raise ValueError('Could not find data field in arg: {}'.format(arg))
if name in cls._blocklist_caffe2_args:
assert value in cls._blocklist_caffe2_args[arg.name]
return None
return helper.make_attribute(name, value)
@classmethod
def caffe2_arg_to_onnx_attr(cls, op_def, arg):
return cls._common_caffe2_arg_to_onnx_attr(op_def, arg)
@classmethod
def _common_caffe2_op_to_onnx_node(cls, op_def, shapes):
node_def = NodeProto()
node_def.name = op_def.name
node_def.op_type = cls._renamed_operators.get(op_def.type, op_def.type)
node_def.input.extend(op_def.input)
node_def.output.extend(op_def.output)
attrs = filter(None, [cls.caffe2_arg_to_onnx_attr(op_def, arg)
for arg in op_def.arg])
node_def.attribute.extend(attrs)
return node_def
@classmethod
def caffe2_op_to_onnx_node(cls, op_def, shapes):
if C.support_onnx_export(op_def.type):
node_strs, tensor_strs = C.export_to_onnx(cls._dummy_name, op_def.SerializeToString(), shapes)
nodes = []
for s in node_strs:
node = NodeProto()
node.ParseFromString(s)
nodes.append(node)
const_tensors = []
for s in tensor_strs:
tensor = TensorProto()
tensor.ParseFromString(s)
const_tensors.append(tensor)
return nodes, const_tensors
elif op_def.type in cls._special_operators:
translator = getattr(cls, cls._special_operators[op_def.type])
else:
translator = cls._common_caffe2_op_to_onnx_node
nodes = translator(op_def, shapes)
const_tensors = []
if isinstance(nodes, tuple):
nodes, const_tensors = nodes
if not isinstance(nodes, collections.abc.Iterable):
nodes = [nodes]
return nodes, const_tensors
@staticmethod
def _all_names_in_net(net):
if net is None:
return set()
names = set()
names.update(net.external_input)
names.update(net.external_output)
for op in net.op:
names.update(op.input)
names.update(op.output)
return names
@staticmethod
def _extract_value_info(tensor):
return make_tensor_value_info(
name=tensor.name,
elem_type=tensor.data_type,
shape=tensor.dims)
@classmethod
def caffe2_net_to_onnx_graph(cls,
predict_net,
init_net=None,
value_info=None):
if value_info is None:
value_info = {}
if not isinstance(value_info, dict):
raise ValueError('Please pass value_info as a '
'name -> (type, shape) dictionary')
cls._filter_fake_init(init_net, value_info)
cls._ssa_rewrite(predict_net, init_net, value_info)
if init_net:
initializer = cls.caffe2_init_net_to_initializer(init_net)
value_info.update({init.name: (init.data_type, init.dims)
for init in initializer})
else:
initializer = []
# Check if value_info contains the types/shapes of all the blobs, in
# which case we don't need to infer them by running the net.
run_native_net = False
for op in predict_net.op:
for name in itertools.chain(op.input, op.output):
if name not in value_info:
run_native_net = True
break
# Check whether we have got type shape info of all input
missing = (set(list(predict_net.external_input)) -
set(value_info.keys()))
if missing:
raise RuntimeError('Could not find value info of inputs: {}'.format(
', '.join(missing)))
ws = None
outputs = None
if run_native_net:
inputs = {}
for name in predict_net.external_input:
elem_type, shape = value_info[name]
inputs[name] = np.random.randn(*shape).astype(
mapping.TENSOR_TYPE_TO_NP_TYPE[elem_type])
ws, outputs = c2_native_run_net(
init_net,
predict_net,
inputs)
for name in predict_net.external_output:
output = outputs[name]
elem_type = mapping.NP_TYPE_TO_TENSOR_TYPE[output.dtype]
shape = output.shape
value_info[name] = (elem_type, shape)
graph_def = GraphProto()
graph_def.name = predict_net.name
graph_def.initializer.extend(initializer)
# This is a mapping from Caffe2 names to ONNX names
graph_def.input.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
shape=value_info[name][1])
for name in predict_net.external_input)
cls._dummy_name.reset(cls._all_names_in_net(predict_net) | cls._all_names_in_net(init_net))
for op in predict_net.op:
shapes = {}
for name in itertools.chain(op.input, op.output):
if ws:
blob = ws.FetchBlob(name)
if hasattr(blob, 'shape'):
shapes[name] = blob.shape
else:
shapes[name] = value_info[name][1]
nodes, const_tensors = cls.caffe2_op_to_onnx_node(op, shapes=shapes)
graph_def.node.extend(nodes)
graph_def.initializer.extend(const_tensors)
graph_def.input.extend([cls._extract_value_info(tensor) for tensor in const_tensors])
all_output = set(sum((list(node.output) for node in graph_def.node),
[init.name for init in graph_def.initializer]))
redundant_output = set(vi.name for vi in graph_def.output) - all_output
if redundant_output:
logger.warning(
'There are graph output not produced by any node or initializer: {}'
'! Will drop them.'.format(', '.join(redundant_output)))
graph_def.output.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
shape=value_info[name][1])
for name in predict_net.external_output
if name in all_output)
return graph_def
@classmethod
def caffe2_init_net_to_initializer(cls, init_net):
ws, _ = c2_native_run_net(init_net=None, predict_net=init_net, inputs=[])
output_names = []
for op in init_net.op:
output_names.extend(op.output)
initializer = [numpy_helper.from_array(ws.FetchBlob(name), name=name)
for name in sorted(set(output_names))]
return initializer
@classmethod
def _filter_fake_init(cls, init_net, value_info):
if init_net:
fake_inits = [op for op in init_net.op
if len(op.output) == 1 and op.output[0] in value_info and
re.match('GivenTensor.*Fill|ConstantFill', op.type)]
for fake_init in fake_inits:
init_net.op.remove(fake_init)
del fake_inits[:]
del fake_inits
@classmethod
def ssa_rewrite(cls, net, init_net, value_info):
return cls._ssa_rewrite(net, init_net, value_info)
@classmethod
def _ssa_rewrite(cls, net, init_net, value_info):
def ssa_name(name, version, version_cnt=None):
if version == 0:
return name
if version_cnt and len(version_cnt.get(name, {})) <= 1:
return name
return '{}_{}'.format(name, version)
if init_net:
for op in init_net.op:
assert re.match('GivenTensor.*Fill', op.type), "type is {}, \n{}".format(op.type, op)
assert len(op.output) == 1
ssa, blob_versions = caffe2_core.get_ssa(net)
version_cnt = {}
versioned_blobs = []
for versioned_input, versioned_output in ssa:
versioned_blobs += versioned_input
versioned_blobs += versioned_output
for (name, version) in versioned_blobs:
if name not in version_cnt:
version_cnt[name] = {version}
else:
version_cnt[name].add(version)
assert len(net.op) == len(ssa)
for op, (versioned_inputs, versioned_outputs) in zip(net.op, ssa):
op.input[:] = [ssa_name(name, version, version_cnt)
for name, version in versioned_inputs]
op.output[:] = [ssa_name(name, version, version_cnt)
for name, version in versioned_outputs]
net.external_output[:] = [ssa_name(name, blob_versions[name], version_cnt)
for name in net.external_output]
@classmethod
def caffe2_net_to_onnx_model(cls, *args, **kwargs):
opset_id = OperatorSetIdProto()
opset_id.domain = '' # ONNX default domain
opset_id.version = cls.target_opset_version
model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs),
opset_imports=[opset_id], # current supported opset version
producer_name='onnx-caffe2', # producer name
)
checker.check_model(model)
return model
caffe2_net_to_onnx_graph = Caffe2Frontend.caffe2_net_to_onnx_graph
caffe2_net_to_onnx_model = Caffe2Frontend.caffe2_net_to_onnx_model
caffe2_init_net_to_initializer = Caffe2Frontend.caffe2_init_net_to_initializer
ssa_rewrite = Caffe2Frontend.ssa_rewrite
|
pytorch-master
|
caffe2/python/onnx/frontend.py
|
pytorch-master
|
caffe2/python/onnx/__init__.py
|
|
## @package onnx
# Module caffe2.python.onnx.helper
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
def c2_native_run_op(op_def, inputs):
ws = Workspace()
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value, op_def.device_option)
else:
assert(len(op_def.input) == len(inputs))
for key, value in zip(op_def.input, inputs):
ws.FeedBlob(key, value, op_def.device_option)
ws.RunOperatorOnce(op_def)
output_names = op_def.output
output_values = [ws.FetchBlob(name) for name in output_names]
return ws, namedtupledict('Outputs', output_names)(*output_values)
def c2_native_run_net(init_net, predict_net, inputs, debug_arg=None):
ws = Workspace()
if init_net:
ws.RunNetOnce(init_net)
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value, predict_net.device_option)
else:
uninitialized = [input_name
for input_name in predict_net.external_input
if not ws.HasBlob(input_name)]
if len(uninitialized) == len(inputs):
for key, value in zip(uninitialized, inputs):
ws.FeedBlob(key, value, predict_net.device_option)
else:
# If everything is initialized,
# we just initialized the first len(inputs) external_input.
# Added some extra logging to help debug sporadic sandcastle fails
if len(inputs) > len(predict_net.external_input):
print("c2_native_run_net assert. len(inputs)=", len(inputs),
"len(predict_net.external_input)=",
len(predict_net.external_input))
print("debug_arg: ", debug_arg)
print("predict_net ", type(predict_net), ":", predict_net)
print("inputs ", type(inputs), ":", inputs)
assert(len(inputs) <= len(predict_net.external_input))
for i in range(len(inputs)):
ws.FeedBlob(predict_net.external_input[i], inputs[i],
predict_net.device_option)
ws.RunNetOnce(predict_net)
output_names = predict_net.external_output
output_values = [ws.FetchBlob(name) for name in output_names]
return ws, namedtupledict('Outputs', output_names)(*output_values)
def load_caffe2_net(file):
net = caffe2_pb2.NetDef()
with open(file, "rb") as f:
net.ParseFromString(f.read())
return net
def save_caffe2_net(net, file, output_txt=False):
with open(file, "wb") as f:
f.write(net.SerializeToString())
if output_txt:
with open(file + "txt", "w") as f:
f.write(str(net))
def benchmark_caffe2_model(init_net, predict_net, warmup_iters=3, main_iters=10, layer_details=True):
'''
Run the benchmark net on the target model.
Return the execution time per iteration (millisecond).
'''
ws = Workspace()
if init_net:
ws.RunNetOnce(init_net)
ws.CreateNet(predict_net)
results = ws.BenchmarkNet(predict_net.name, warmup_iters, main_iters, layer_details)
del ws
return results[0]
def benchmark_pytorch_model(model, inputs, training=False, warmup_iters=3,
main_iters=10, verbose=False):
'''
Run the model several times, and measure the execution time.
Return the execution time per iteration (millisecond).
'''
for _i in range(warmup_iters):
model(*inputs)
total_pytorch_time = 0.0
for _i in range(main_iters):
ts = time.time()
model(*inputs)
te = time.time()
total_pytorch_time += te - ts
log.info("The PyTorch model execution time per iter is {} milliseconds, "
"{} iters per second.".format(total_pytorch_time / main_iters * 1000,
main_iters / total_pytorch_time))
return total_pytorch_time * 1000 / main_iters
|
pytorch-master
|
caffe2/python/onnx/helper.py
|
## @package onnx
# Module caffe2.python.onnx.workspace
import uuid
from caffe2.python import workspace
# Separating out the context manager part so that users won't
# (mis-)use Workspace instances as context managers
class _WorkspaceCtx(object):
def __init__(self, workspace_id):
self.workspace_id = workspace_id
# A stack, so that the context manager is reentrant.
self.workspace_stack = []
def __enter__(self):
self.workspace_stack.append(workspace.CurrentWorkspace())
workspace.SwitchWorkspace(self.workspace_id, create_if_missing=True)
def __exit__(self, exc_type, exc_value, traceback):
w = self.workspace_stack.pop()
# Strictly speaking, create_if_missing here is unnecessary, since a user
# is not supposed to be allowed to destruct a workspace while we're in
# it. However, empirically, it has been observed that during abnormal
# shutdown, Caffe2 deletes its default workspace fairly early in the
# final calls to destructors. In this case, we may attempt to exit
# to a default workspace which no longer exists. create_if_missing=True
# will (harmlessly) recreate the workspace before we finally quit.)
workspace.SwitchWorkspace(w, create_if_missing=True)
class Workspace(object):
"""
An object representing a Caffe2 workspace. It is a context manager,
so you can say 'with workspace:' to use the represented workspace
as your global workspace. It also supports every method supported
by caffe2.python.workspace, but instead of running these operations
in the global workspace, it runs them in the workspace represented
by this object. When this object goes dead, the workspace (and all
nets and blobs within it) are freed.
Why do we need this class? Caffe2's workspace model is very "global state"
oriented, in that there is always some ambient global workspace you are
working in which holds on to all of your networks and blobs. This class
makes it possible to work with workspaces more locally, and without
forgetting to deallocate everything in the end.
"""
def __init__(self):
# Caffe2 (apparently) doesn't provide any native method of generating
# a fresh, unused workspace, so we have to fake it by generating
# a unique ID and hoping it's not used already / will not be used
# directly in the future.
self._ctx = _WorkspaceCtx(str(uuid.uuid4()))
def __getattr__(self, attr):
def f(*args, **kwargs):
with self._ctx:
return getattr(workspace, attr)(*args, **kwargs)
return f
def __del__(self):
# NB: This is a 'self' call because we need to switch into the workspace
# we want to reset before we actually reset it. A direct call to
# workspace.ResetWorkspace() will reset the ambient workspace, which
# is not want we want.
self.ResetWorkspace()
|
pytorch-master
|
caffe2/python/onnx/workspace.py
|
# @package onnx
# Module caffe2.python.onnx.backend_rep
from caffe2.python import core
from caffe2.proto import caffe2_pb2
from onnx.backend.base import BackendRep, namedtupledict
class Caffe2Rep(BackendRep):
def __init__(self, init_net, predict_net, workspace, uninitialized):
super(Caffe2Rep, self).__init__()
self.init_net = init_net
self.predict_net = predict_net
self.workspace = workspace
# The list of uninitialized external_inputs in workspace, we need this to
# pair the name with given sequence inputs.
self.uninitialized = uninitialized
self.nets_created = False
self.ran_init_net = False
@property
def _name_scope(self):
if self.predict_net.device_option.device_type == caffe2_pb2.CUDA:
return 'gpu_{}'.format(self.predict_net.device_option.device_id)
return ''
def run(self, inputs, **kwargs):
super(Caffe2Rep, self).run(inputs, **kwargs)
with core.DeviceScope(self.predict_net.device_option):
if isinstance(inputs, dict):
with core.NameScope(self._name_scope):
for key, value in inputs.items():
self.workspace.FeedBlob(key, value)
elif isinstance(inputs, list) or isinstance(inputs, tuple):
if len(self.uninitialized) != len(inputs):
raise RuntimeError('Expected {} values for uninitialized '
'graph inputs ({}), but got {}.'.format(
len(self.uninitialized),
', '.join(self.uninitialized),
len(inputs)))
for i, value in enumerate(inputs):
# namescope already baked into protobuf
self.workspace.FeedBlob(self.uninitialized[i], value)
else:
# single input
self.workspace.FeedBlob(self.uninitialized[0], inputs)
if not self.nets_created:
self.workspace.CreateNet(self.init_net)
self.workspace.CreateNet(self.predict_net)
self.nets_created = True
if not self.ran_init_net:
self.workspace.RunNet(self.init_net.name)
self.ran_init_net = True
self.workspace.RunNet(self.predict_net.name)
output_values = []
for name in self.predict_net.external_output:
try:
output_values.append(self.workspace.FetchBlob(name))
except Exception:
output_values.append(self.workspace.FetchInt8Blob(name))
return namedtupledict('Outputs',
self.predict_net.external_output)(*output_values)
|
pytorch-master
|
caffe2/python/onnx/backend_rep.py
|
## @package onnx
#Module caffe2.python.onnx.onnxifi
"""
ONNXIFI a Caffe2 net
"""
from caffe2.proto import caffe2_pb2
import caffe2.python._import_c_extension as C
def onnxifi_set_option(option_name, option_value):
"""
Set onnxifi option
"""
return C.onnxifi_set_option(option_name, str(option_value))
def onnxifi_get_option(option_name):
"""
Get onnxifi option
"""
return C.onnxifi_get_option(option_name)
def onnxifi_caffe2_net(
pred_net,
input_shapes,
max_batch_size=1,
max_seq_size=1,
debug=False,
use_onnx=True,
merge_fp32_inputs_into_fp16=False,
adjust_batch=True,
block_list=None,
weight_names=None,
net_ssa_rewritten=False,
timeout=0):
"""
Transform the caffe2_net by collapsing ONNXIFI-runnable nodes into Onnxifi c2 ops
"""
shape_hints = caffe2_pb2.TensorBoundShapes()
if type(input_shapes) is caffe2_pb2.TensorBoundShapes:
shape_hints = input_shapes
elif type(input_shapes) is dict:
for k, v in input_shapes.items():
tbs = caffe2_pb2.TensorBoundShape()
tbs.name = k
tbs.shape.dims.extend(v)
tbs.dim_type.extend([caffe2_pb2.TensorBoundShape.CONSTANT] * len(tbs.shape.dims))
tbs.dim_type[0] = caffe2_pb2.TensorBoundShape.BATCH
shape_hints.shapes.extend([tbs])
shape_hints.max_batch_size = max_batch_size
shape_hints.max_feature_len = max_seq_size
pred_net_str = C.onnxifi(pred_net.SerializeToString(),
shape_hints.SerializeToString(),
block_list if block_list else [],
weight_names if weight_names is not None else [],
max_batch_size,
max_seq_size,
timeout,
adjust_batch,
debug,
merge_fp32_inputs_into_fp16,
net_ssa_rewritten,
use_onnx)
pred_net_cut = caffe2_pb2.NetDef()
pred_net_cut.ParseFromString(pred_net_str)
return pred_net_cut
|
pytorch-master
|
caffe2/python/onnx/onnxifi.py
|
import numpy as np
import time
import unittest
import onnx
import onnx.defs
from onnx.backend.base import namedtupledict
from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.models.download import ModelDownloader
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.onnx.tests.test_utils import TestCase
ONNXIFI_DATATYPE_FLOAT32 = 1
def _print_net(net):
for i in net.external_input:
print("Input: {}".format(i))
for i in net.external_output:
print("Output: {}".format(i))
for op in net.op:
print("Op {}".format(op.type))
for x in op.input:
print(" input: {}".format(x))
for y in op.output:
print(" output: {}".format(y))
class OnnxifiTest(TestCase):
@unittest.skip("Need ONNXIFI backend support")
def test_relu_graph(self):
batch_size = 1
X = np.random.randn(batch_size, 1, 3, 2).astype(np.float32)
graph_def = make_graph(
[make_node("Relu", ["X"], ["Y"])],
name="test",
inputs=[make_tensor_value_info("X", onnx.TensorProto.FLOAT,
[batch_size, 1, 3, 2])],
outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT,
[batch_size, 1, 3, 2])])
model_def = make_model(graph_def, producer_name='relu-test')
op = core.CreateOperator(
"Onnxifi",
["X"],
["Y"],
onnx_model=model_def.SerializeToString(),
input_names=["X"],
output_names=["Y"],
output_shape_hint_0=[ONNXIFI_DATATYPE_FLOAT32, batch_size, 1, 3, 2])
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob("Y")
np.testing.assert_almost_equal(Y, np.maximum(X, 0))
@unittest.skip("Need ONNXIFI backend support")
def test_conv_graph(self):
X = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]]]]).astype(np.float32)
W = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights
[1., 1., 1.],
[1., 1., 1.]]]]).astype(np.float32)
Y_without_padding = np.array([[[[54., 63., 72.], # (1, 1, 3, 3) output tensor
[99., 108., 117.],
[144., 153., 162.]]]]).astype(np.float32)
graph_def = make_graph(
[make_node(
'Conv',
inputs=['X', 'W'],
outputs=['Y'],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[0, 0, 0, 0],
)],
name="test",
inputs=[make_tensor_value_info("X", onnx.TensorProto.FLOAT, [1, 1, 5, 5]),
make_tensor_value_info("W", onnx.TensorProto.FLOAT, [1, 1, 3, 3]),
],
outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT,
[1, 1, 3, 3])])
model_def = make_model(graph_def, producer_name='conv-test')
# We intentional rewrite the input/output name so test that the
# input/output binding of c2 op is positional
op = core.CreateOperator(
"Onnxifi",
["X0"],
["Y0"],
onnx_model=model_def.SerializeToString(),
initializers=["W", "W0"],
input_names=["X"],
output_names=["Y"],
output_shape_hint_0=[ONNXIFI_DATATYPE_FLOAT32, 1, 1, 3, 3])
workspace.FeedBlob("X0", X)
workspace.FeedBlob("W0", W)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob("Y0")
np.testing.assert_almost_equal(Y, Y_without_padding)
class OnnxifiTransformTest(TestCase):
def setUp(self):
self.model_downloader = ModelDownloader()
def _add_head_tail(self, pred_net, new_head, new_tail):
orig_head = pred_net.external_input[0]
orig_tail = pred_net.external_output[0]
# Add head
head = caffe2_pb2.OperatorDef()
head.type = "Copy"
head.input.append(new_head)
head.output.append(orig_head)
dummy = caffe2_pb2.NetDef()
dummy.op.extend(pred_net.op)
del pred_net.op[:]
pred_net.op.extend([head])
pred_net.op.extend(dummy.op)
pred_net.external_input[0] = new_head
# Add tail
tail = caffe2_pb2.OperatorDef()
tail.type = "Copy"
tail.input.append(orig_tail)
tail.output.append(new_tail)
pred_net.op.extend([tail])
pred_net.external_output[0] = new_tail
@unittest.skip("Need ONNXIFI backend support")
def test_resnet50_core(self):
N = 1
repeat = 1
print("Batch size: {}, repeat inference {} times".format(N, repeat))
init_net, pred_net, _ = self.model_downloader.get_c2_model('resnet50')
self._add_head_tail(pred_net, 'real_data', 'real_softmax')
input_blob_dims = (N, 3, 224, 224)
input_name = "real_data"
device_option = core.DeviceOption(caffe2_pb2.CPU, 0)
init_net.device_option.CopyFrom(device_option)
pred_net.device_option.CopyFrom(device_option)
for op in pred_net.op:
op.device_option.CopyFrom(device_option)
net_outputs = pred_net.external_output
Y_c2 = None
data = np.random.randn(*input_blob_dims).astype(np.float32)
c2_time = 1
workspace.SwitchWorkspace("onnxifi_test", True)
with core.DeviceScope(device_option):
workspace.FeedBlob(input_name, data)
workspace.RunNetOnce(init_net)
workspace.CreateNet(pred_net)
start = time.time()
for _ in range(repeat):
workspace.RunNet(pred_net.name)
end = time.time()
c2_time = end - start
output_values = [workspace.FetchBlob(name) for name in net_outputs]
Y_c2 = namedtupledict('Outputs', net_outputs)(*output_values)
workspace.ResetWorkspace()
# Fill the workspace with the weights
with core.DeviceScope(device_option):
workspace.RunNetOnce(init_net)
# Cut the graph
start = time.time()
pred_net_cut = onnxifi_caffe2_net(pred_net,
{input_name: input_blob_dims},
infer_shapes=True)
del init_net, pred_net
#_print_net(pred_net_cut)
Y_trt = None
input_name = pred_net_cut.external_input[0]
print("C2 runtime: {}s".format(c2_time))
with core.DeviceScope(device_option):
workspace.FeedBlob(input_name, data)
workspace.CreateNet(pred_net_cut)
end = time.time()
print("Conversion time: {:.2f}s".format(end - start))
start = time.time()
for _ in range(repeat):
workspace.RunNet(pred_net_cut.name)
end = time.time()
trt_time = end - start
print("Onnxifi runtime: {}s, improvement: {}%".format(trt_time, (c2_time - trt_time) / c2_time * 100))
output_values = [workspace.FetchBlob(name) for name in net_outputs]
Y_trt = namedtupledict('Outputs', net_outputs)(*output_values)
np.testing.assert_allclose(Y_c2, Y_trt, rtol=1e-3)
|
pytorch-master
|
caffe2/python/onnx/test_onnxifi.py
|
## @package onnx
# Module caffe2.python.onnx.bin.conversion
import json
from caffe2.proto import caffe2_pb2
import click
from onnx import ModelProto
from caffe2.python.onnx.backend import Caffe2Backend as c2
import caffe2.python.onnx.frontend as c2_onnx
@click.command(
help='convert caffe2 net to onnx model',
context_settings={
'help_option_names': ['-h', '--help']
}
)
@click.argument('caffe2_net', type=click.File('rb'))
@click.option('--caffe2-net-name',
type=str,
help="Name of the caffe2 net")
@click.option('--caffe2-init-net',
type=click.File('rb'),
help="Path of the caffe2 init net pb file")
@click.option('--value-info',
type=str,
help='A json string providing the '
'type and shape information of the inputs')
@click.option('-o', '--output', required=True,
type=click.File('wb'),
help='Output path for the onnx model pb file')
def caffe2_to_onnx(caffe2_net,
caffe2_net_name,
caffe2_init_net,
value_info,
output):
c2_net_proto = caffe2_pb2.NetDef()
c2_net_proto.ParseFromString(caffe2_net.read())
if not c2_net_proto.name and not caffe2_net_name:
raise click.BadParameter(
'The input caffe2 net does not have name, '
'--caffe2-net-name must be provided')
c2_net_proto.name = caffe2_net_name or c2_net_proto.name
if caffe2_init_net:
c2_init_net_proto = caffe2_pb2.NetDef()
c2_init_net_proto.ParseFromString(caffe2_init_net.read())
c2_init_net_proto.name = '{}_init'.format(caffe2_net_name)
else:
c2_init_net_proto = None
if value_info:
value_info = json.loads(value_info)
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=c2_net_proto,
init_net=c2_init_net_proto,
value_info=value_info)
output.write(onnx_model.SerializeToString())
@click.command(
help='convert onnx model to caffe2 net',
context_settings={
'help_option_names': ['-h', '--help']
}
)
@click.argument('onnx_model', type=click.File('rb'))
@click.option('-o', '--output', required=True,
type=click.File('wb'),
help='Output path for the caffe2 net file')
@click.option('--init-net-output',
required=True,
type=click.File('wb'),
help='Output path for the caffe2 init net file')
def onnx_to_caffe2(onnx_model, output, init_net_output):
onnx_model_proto = ModelProto()
onnx_model_proto.ParseFromString(onnx_model.read())
init_net, predict_net = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
init_net_output.write(init_net.SerializeToString())
output.write(predict_net.SerializeToString())
|
pytorch-master
|
caffe2/python/onnx/bin/conversion.py
|
pytorch-master
|
caffe2/python/onnx/bin/__init__.py
|
|
# @package onnx
# Module caffe2.python.onnx.tests.onnx_backend_test
import os
import unittest
import onnx.backend.test
import caffe2.python.onnx.backend as c2
from caffe2.python import core
core.SetEnginePref({}, {})
# This is a pytest magic variable to load extra plugins
pytest_plugins = 'onnx.backend.test.report',
backend_test = onnx.backend.test.BackendTest(c2, __name__)
backend_test.exclude(r'(test_hardsigmoid' # Does not support Hardsigmoid.
'|test_hardmax' # Does not support Hardmax.
'|test_.*FLOAT16.*' # Does not support Cast on Float16.
'|test_depthtospace.*' # Does not support DepthToSpace.
'|test_reduce_l1.*' # Does not support ReduceL1.
'|test_reduce_l2.*' # Does not support ReduceL2.
'|test_reduce_log_sum.*' # Does not support ReduceLogSum.
'|test_reduce_prod.*' # Does not support ReduceProd.
'|test_reduce_sum_square.*' # Does not support ReduceSumSquare
'|test_det.*' # Does not support Det
'|test_range.*' # Does not support Range
'|test_tile.*' # Tile's Caffe2 implementation needs some tweak
'|test_lstm.*' # Seems LSTM case has some problem
'|test_simple_rnn.*' # Seems simple RNN case has some problem
'|test_gru.*' # Seems GRU case has some problem
'|test_prelu.*' # PRelu is not compliant with ONNX yet
'|test_operator_repeat.*' # Tile is not compliant with ONNX yet
'|test_.*pool_.*same.*' # Does not support pool same.
'|test_.*pool_.*ceil.*' # Does not support pool same.
'|test_maxpool_with_argmax.*' # MaxPool outputs indices in different format.
'|test_maxpool.*dilation.*' # MaxPool doesn't support dilation yet.
'|test_maxpool.*uint8.*' # MaxPool doesn't support uint8 yet.
'|test_convtranspose.*' # ConvTranspose needs some more complicated translation
'|test_mvn.*' # MeanVarianceNormalization is experimental and not supported.
'|test_dynamic_slice.*' # MeanVarianceNormalization is experimental and not supported.
'|test_eyelike.*' # Needs implementation
'|test_maxunpool.*' # Needs implementation
'|test_acosh.*' # Needs implementation
'|test_asinh.*' # Needs implementation
'|test_atanh.*' # Needs implementation
'|test_onehot.*' # Needs implementation
'|test_scan.*' # Needs implementation
'|test_isnan.*' # Needs implementation
'|test_scatter.*' # Should be similar to ScatterAssign
'|test_constantofshape_int.*' # Needs implementation
'|test_shrink.*' # Needs implementation
'|test_strnorm.*' # Needs implementation
'|test_nonzero.*' # Needs implementation
'|test_tfidfvectorizer.*' # Needs implementation
'|test_top_k.*' # opset 10 is not supported yet
'|test_resize.*' # opset 10 is not supported yet
'|test_slice.*' # opset 10 is not supported yet
'|test_.*qlinear.*' # Skip quantized op test
'|test_.*quantize.*' # Skip quantized op test
'|test_.*matmulinteger.*' # Skip quantized op test
'|test_.*convinteger.*' # Skip quantized op test
'|test_isinf.*' # Needs implementation
'|test_mod.*' # Needs implementation
'|test_nonmaxsuppression.*' # Needs implementation
'|test_reversesequence.*' # Needs implementation
'|test_roialign.*' # Needs implementation
'|test_bitshift.*' # Needs implementation
'|test_round.*' # Needs implementation
'|test_cumsum.*' # Needs implementation
'|test_clip.*' # opset 11 is not supported yet
'|test_gather_elements.*' # opset 11 is not supported yet
'|test_scatter.*' # opset 11 is not supported yet
'|test_unique.*' # opset 11 is not supported yet
'|test_gathernd.*' # opset 11 is not supported yet
'|test_dropout_random.*' # opset 12 is not supported
'|test_dropout_default.*' # opset 12 is not supported
'|test_einsum.*' # opset 12 is not supported
'|test_.*training.*' # training is not supported
'|test_.*_loss.*' # training is not supported
'|test_split_zero_size.*' # unsupported case
'|test_constantofshape_int_shape_zero.*' # unsupported case
'|test_constant_pad.*' # 1d pad is not supported
'|test_edge_pad.*' # 1d pad is not supported
'|test_reflect_pad.*' # 1d pad is not supported
'|test_gemm_default_no_bias.*' # no bias is not supported
'|test_gemm_default_scalar_bias.*' # incorrect type
'|test_sequence_.*' # type sequence is not supported yet
'|test_.*negative_ax.*' # negative axis is not supported yet
'|test_.*negative_ind.*' # negative axis is not supported yet
'|test_argmax_.*select_last_index.*' # unsupported case
'|test_argmin_.*select_last_index_.*' # unsupported case
'|test_celu.*' # unsupported case
'|test_gathernd.*' # unsupported case
'|test_greater_equal.*' # unsupported case
'|test_less_equal.*' # unsupported case
'|test_max_.*' # unsupported case
'|test_min_.*' # unsupported case
'|test_.*momentum_.*' # unsupported case
'|test_sce.*' # unsupported case
'|test_nllloss.*' # unsupported case
'|test_unfoldtodepth.*' # unsupported case
'|test_.*gradient.*' # no support for gradient op in c2-onnx
'|test_.*adagrad.*' # no support for gradient op in c2-onnx
'|test_.*loss.*' # no support for loss op in c2-onnx
'|test_.*adam.*' # no support for adam op
'|test_.*identity.*' # no support for adam op
')')
# Quick patch to unbreak master CI, is working on the debugging.
backend_test.exclude('(test_cast_.*'
'|test_compress_.*'
'|test_Conv1d_.*cuda'
'|test_Conv3d_groups_cuda'
'|test_rnn_seq_length'
'|test_operator_add.*_cuda'
'|test_operator_lstm_cuda'
'|test_operator_rnn.*_cuda'
'|test_lrn_default_cuda)')
# Temporarily skip some ONNX backend tests with broadcasting.
backend_test.exclude('(test_pow_bcast'
'|test_pow_types.*'
')')
# Temporarily skip some ONNX backend tests due to updates in opset 13.
backend_test.exclude('(test_if_.*' # added support for sequence type inputs
'|test_if_seq_.*' # added support for sequence type inputs
'|test_logsoftmax_.*' # axis attr default value changed from 1 to -1
'|test_loop11_.*' # seg fault issue
'|test_loop16_.*' # seg fault issue
'|test_loop13_seq_.*' # no support for sequence inputs for scan input
'|test_reduce_sum_.*' # axes is now an input (not attr), added noop_with_empty_axes
'|test_softmax_.*' # axis attr default value changed from 1 to -1
'|test_split_variable_parts_.*' # axes is now an input (not attr)
'|test_squeeze_.*' # axes is now an input (not attr)
'|test_unsqueeze_.*' # axes is now an input (not attr)
'|test_MaxPool1d_stride_padding_dilation_.*'
'|test_MaxPool2d_stride_padding_dilation_.*'
')')
# Temporarily skip some ONNX backend tests due to updates in opset 14.
backend_test.exclude('(test_add_uint8_.*' # uint8 dtype added
'|test_div_uint8_.*' # uint8 dtype added
'|test_hardswish_.*' # new operator added
'|test_mul_uint8_.*' # uint8 dtype added
'|test_sub_uint8_.*' # uint8 dtype added
'|test_tril_.*' # new operator added
'|test_triu_.*' # new operator added
'|test_identity_sequence_.*' # new operator added
'|test_reshape_allowzero_reordered_.*'
'|test_conv_with_autopad_same_.*'
')')
# Unsupported ops in opset 15
backend_test.exclude('(test_bernoulli_.*'
'|test_castlike_.*'
'|test_optional_.*'
'|test_shape_end_.*'
'|test_shape_start_.*'
'|test_identity_opt_*'
'|test_loop16_seq_none_*'
'|test_if_opt_*'
')')
# Unsupported ops in opset 16
backend_test.exclude('(test_gridsample_.*'
'|test_spacetodepth_.*'
')')
# Unsupported ops in opset 17
backend_test.exclude('(test_layer_normalization_.*'
'|test_blackmanwindow_.*'
'|test_dft_.*'
'|test_hammingwindow_.*'
'|test_hannwindow_.*'
'|test_melweightmatrix_.*'
'|test_stft_.*'
'|test_sequencemap_.*'
')')
# Skip vgg to speed up CI
if 'JENKINS_URL' in os.environ:
backend_test.exclude(r'(test_vgg19|test_vgg)')
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test
.enable_report()
.test_cases)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/onnx/tests/onnx_backend_test.py
|
## @package onnx
# Module caffe2.python.onnx.tests.test_utils
import unittest
import numpy as np
class TestCase(unittest.TestCase):
def setUp(self):
np.random.seed(seed=0)
def assertSameOutputs(self, outputs1, outputs2, decimal=7):
self.assertEqual(len(outputs1), len(outputs2))
for o1, o2 in zip(outputs1, outputs2):
self.assertEqual(o1.dtype, o2.dtype)
np.testing.assert_almost_equal(o1, o2, decimal=decimal)
def add_test_case(self, name, test_func):
if not name.startswith('test_'):
raise ValueError('Test name must start with test_: {}'.format(name))
if hasattr(self, name):
raise ValueError('Duplicated test name: {}'.format(name))
setattr(self, name, test_func)
|
pytorch-master
|
caffe2/python/onnx/tests/test_utils.py
|
## @package onnx
# Module caffe2.python.onnx.tests.helper_test
import unittest
from caffe2.python.onnx.tests.test_utils import TestCase
import caffe2.python._import_c_extension as C
class TestCaffe2Basic(TestCase):
def test_dummy_name(self):
g = C.DummyName()
g.reset()
names_1 = [g.new_dummy_name() for _ in range(3)]
g.reset()
names_2 = [g.new_dummy_name() for _ in range(3)]
self.assertEqual(names_1, names_2)
g.reset(set(names_1))
names_3 = [g.new_dummy_name() for _ in range(3)]
self.assertFalse(set(names_1) & set(names_3))
g.reset(set(names_1))
names_4 = [g.new_dummy_name() for _ in range(3)]
self.assertFalse(set(names_1) & set(names_4))
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/onnx/tests/helper_test.py
|
## @package onnx
# Module caffe2.python.onnx.tests.ssa_test
import copy
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from onnx import TensorProto
import caffe2.python.onnx.frontend as c2_onnx
from caffe2.python.onnx.helper import c2_native_run_net
from caffe2.python.onnx.tests.test_utils import TestCase
class TestFrontendSSAConversion(TestCase):
def test_ssa(self):
X = np.random.randn(4, 2).astype(np.float32)
W = np.random.randn(3, 2).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
s = np.random.randn(1).astype(np.float32)
np_result = X.dot(W.transpose()) + b + s
net = caffe2_pb2.NetDef()
net.name = 'test-ssa'
net.external_input[:] = ['W', 'X', 'b', 's']
net.op.extend([
core.CreateOperator(
'FC',
['X', 'W', 'b'],
['Y']
),
core.CreateOperator(
'Add',
['Y', 's'],
['Y'],
broadcast=True,
)
])
net.external_output[:] = ['Y']
init_net = caffe2_pb2.NetDef()
init_net.name = 'test-ssa-init'
init_net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
['W'],
values=W,
shape=W.shape,
),
core.CreateOperator(
'GivenTensorFill',
[],
['b'],
values=b,
shape=b.shape,
),
core.CreateOperator(
'GivenTensorFill',
[],
['s'],
values=s,
shape=s.shape,
)
])
init_net.external_output[:] = ['W', 'b', 's']
_, orig_output = c2_native_run_net(
predict_net=net,
init_net=init_net,
inputs=[X])
value_info = {'X': (TensorProto.FLOAT, X.shape)}
c2_onnx.Caffe2Frontend._ssa_rewrite(
net,
init_net,
value_info)
self.assertEqual(net.external_input, ['W', 'X', 'b', 's'])
self.assertEqual(net.op[0].input, ['X', 'W', 'b'])
self.assertEqual(net.op[0].output, ['Y_1'])
self.assertEqual(net.op[1].input, ['Y_1', 's'])
self.assertEqual(net.op[1].output, ['Y_2'])
self.assertEqual(net.external_output, ['Y_2'])
self.assertEqual(init_net.external_input, [])
self.assertEqual(init_net.op[0].input, [])
self.assertEqual(init_net.op[0].output, ['W'])
self.assertEqual(init_net.op[1].input, [])
self.assertEqual(init_net.op[1].output, ['b'])
self.assertEqual(init_net.op[2].input, [])
self.assertEqual(init_net.op[2].output, ['s'])
self.assertEqual(init_net.external_output, ['W', 'b', 's'])
self.assertEqual(value_info, {'X': (TensorProto.FLOAT, X.shape)})
_, ssa_output = c2_native_run_net(
predict_net=net,
init_net=init_net,
inputs=[X])
self.assertSameOutputs(ssa_output, orig_output)
self.assertSameOutputs(ssa_output, [np_result])
def test_idempotence(self):
net = caffe2_pb2.NetDef()
net.name = 'test-idempotence'
net.external_input[:] = ['W', 'X', 'b', 's']
net.op.extend([
core.CreateOperator(
'FC',
['X', 'W', 'b'],
['Y']
),
core.CreateOperator(
'Add',
['Y', 's'],
['Z'],
broadcast=True,
)
])
net.external_output[:] = ['Z']
value_info = {'X': (TensorProto.FLOAT, [4, 2])}
net_copy = copy.deepcopy(net)
c2_onnx.Caffe2Frontend._ssa_rewrite(
net_copy,
None,
value_info)
self.assertEqual(net, net_copy)
|
pytorch-master
|
caffe2/python/onnx/tests/ssa_test.py
|
pytorch-master
|
caffe2/python/onnx/tests/__init__.py
|
|
## @package onnx
# Module caffe2.python.onnx.tests.conversion_test
import json
import tempfile
import textwrap
import traceback
import unittest
import zipfile
from caffe2.proto import caffe2_pb2
from caffe2.python import brew, core
from caffe2.python.model_helper import ModelHelper
from click.testing import CliRunner
import numpy as np
from onnx import helper, ModelProto, TensorProto
from caffe2.python.onnx.helper import c2_native_run_net
from caffe2.python.onnx.bin.conversion import caffe2_to_onnx, onnx_to_caffe2
import caffe2.python.onnx.backend as c2
from caffe2.python.onnx.tests.test_utils import TestCase
class TestConversion(TestCase):
def _run_command(self, cmd, *args, **kwargs):
runner = CliRunner()
result = runner.invoke(cmd, *args, **kwargs)
self.assertEqual(result.exit_code, 0, textwrap.dedent('''
Command exited with non-zero exit code:
output: {}
exception: {}
exc_info: {}
'''.format(result.output,
result.exception,
traceback.format_exception(*result.exc_info))))
return result
def test_caffe2_to_onnx(self):
caffe2_net = tempfile.NamedTemporaryFile()
caffe2_init_net = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
model = ModelHelper(name='caffe2-to-onnx-test')
brew.relu(model, ["X"], "Y")
caffe2_net.write(model.net.Proto().SerializeToString())
caffe2_net.flush()
init_model = ModelHelper(name='caffe2-to-onnx-init-test')
init_model.net.GivenTensorFill([], 'X', shape=[2, 2],
values=np.zeros((2, 2)).flatten().astype(float))
caffe2_init_net.write(init_model.net.Proto().SerializeToString())
caffe2_init_net.flush()
self._run_command(
caffe2_to_onnx, [
caffe2_net.name,
'--caffe2-init-net', caffe2_init_net.name,
'--output', output.name,
],
catch_exceptions=False,
)
onnx_model = ModelProto()
onnx_model.ParseFromString(output.read())
self.assertEqual(len(onnx_model.graph.node), 1)
self.assertEqual(onnx_model.graph.node[0].op_type, 'Relu')
self.assertEqual(len(onnx_model.graph.initializer), 1)
self.assertEqual(onnx_model.graph.initializer[0].name, onnx_model.graph.input[0].name)
def test_caffe2_to_onnx_value_info(self):
caffe2_net = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
model = ModelHelper(name='caffe2-to-onnx-test')
brew.relu(model, ["X"], "Y")
caffe2_net.write(model.net.Proto().SerializeToString())
caffe2_net.flush()
args = [caffe2_net.name, '--output', output.name]
self.assertRaisesRegex(Exception,
'value info',
self._run_command, caffe2_to_onnx, args)
args.extend([
'--value-info',
json.dumps({
'X': (TensorProto.FLOAT, (2, 2)),
})])
self._run_command(caffe2_to_onnx, args)
onnx_model = ModelProto()
onnx_model.ParseFromString(output.read())
self.assertEqual(len(onnx_model.graph.node), 1)
self.assertEqual(onnx_model.graph.node[0].op_type, 'Relu')
self.assertEqual(len(onnx_model.graph.initializer), 0)
@unittest.skip("Disabled due to onnx optimizer deprecation")
def test_onnx_to_caffe2(self):
onnx_model = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
init_net_output = tempfile.NamedTemporaryFile()
node_def = helper.make_node(
"Mul", ["X", "W"], ["Y"])
graph_def = helper.make_graph(
[node_def],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, (1, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
initializer=[helper.make_tensor("W",
TensorProto.FLOAT,
[1, 3],
np.zeros((1, 3)).flatten().astype(float))])
model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
onnx_model.write(model_def.SerializeToString())
onnx_model.flush()
self._run_command(
onnx_to_caffe2, [
onnx_model.name,
'--output', output.name,
'--init-net-output', init_net_output.name,
])
caffe2_net = caffe2_pb2.NetDef()
caffe2_net.ParseFromString(output.read())
self.assertEqual(len(caffe2_net.op), 1)
self.assertEqual(caffe2_net.op[0].type, 'Mul')
caffe2_init_net = caffe2_pb2.NetDef()
caffe2_init_net.ParseFromString(init_net_output.read())
self.assertEqual(len(caffe2_init_net.op), 1)
self.assertEqual(set(sum([list(init_op.output)
for init_op in caffe2_init_net.op], [])),
{'W'})
def test_onnx_to_caffe2_zipfile(self):
buf = tempfile.NamedTemporaryFile()
onnx_model = zipfile.ZipFile(buf, 'w')
node_def = helper.make_node(
"MatMul", ["X", "W"], ["Y"])
X = np.random.rand(2, 3).astype(np.float32)
W = np.random.rand(3, 2).flatten().astype(np.float32)
graph_def = helper.make_graph(
[node_def],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, (3, 2))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
initializer=[helper.make_tensor("W",
TensorProto.FLOAT,
[3, 2],
W.tobytes(),
raw=True)])
model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
onnx_model.writestr('__MODEL_PROTO', model_def.SerializeToString())
onnx_model.writestr('W', W.tobytes())
onnx_model.close()
W = W.reshape((3, 2))
Y_expect = np.matmul(X, W)
c2_model = c2.prepare_zip_archive(buf)
Y = c2_model.run(X).Y
np.testing.assert_allclose(Y, Y_expect)
def _make_fake_if_op(self, true_nodes, false_nodes, output_types):
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2)),
])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2)),
])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
def test_onnx_to_caffe2_if(self):
true_nodes = [helper.make_node(
"MatMul", ["X", "W"], ["Y"])]
false_nodes = [helper.make_node("Slice", ["X"], ["Y"], axes=[0, 1],
starts=[0, 0], ends=[2, 2])]
nodes = self._make_fake_if_op(true_nodes, false_nodes, [(TensorProto.FLOAT, (2, 2), "Y")])
X = np.random.rand(2, 3).astype(np.float32)
W = np.random.rand(3, 2).flatten().astype(np.float32)
graph_def = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, (3, 2))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
initializer=[helper.make_tensor("W",
TensorProto.FLOAT,
[3, 2],
W.tolist())]
)
onnx_id = helper.make_opsetid("", 9)
model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test',
opset_imports=[onnx_id])
p = c2.prepare(model_def)
Y = np.matmul(X, W.reshape(3, 2))
out = p.run(X)
np.testing.assert_allclose(out.Y, Y)
# input_types and output_types are lists of triples of (name, type, shape)
def _make_fake_loop_op(self, body_nodes, input_types, output_types):
ten = helper.make_tensor("trip_count_value", TensorProto.INT64, (1,), [10])
true = helper.make_tensor("condition", TensorProto.BOOL, (1,), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT64, (1,)),
helper.make_tensor_value_info("cond", TensorProto.BOOL, (1,))]
for type, shape, name in input_types:
graph_inputs.append(helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info("cond", TensorProto.BOOL, (1,))]
for type, shape, name in output_types:
graph_outputs.append(helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=ten),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes
@unittest.skip("Disabled due to onnx optimizer deprecation")
def test_onnx_to_caffe2_loop(self):
body_nodes = [helper.make_node(
"MatMul", ["_X", "W"], ["_Y"])]
nodes = self._make_fake_loop_op(body_nodes,
[(TensorProto.FLOAT, (2, 2), "X")],
[(TensorProto.FLOAT, (2, 2), "Y")])
X = np.random.rand(2, 2).astype(np.float32)
W = np.random.rand(2, 2).flatten().astype(np.float32)
graph_def = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 2)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, (2, 2))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
initializer=[helper.make_tensor("W",
TensorProto.FLOAT,
[2, 2],
W.tolist())]
)
model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
Y = X
for _ in range(10):
Y = np.matmul(Y, W.reshape(2, 2))
p = c2.prepare(model_def)
out = p.run(X)
np.testing.assert_allclose(out.Y, Y)
# TODO investigate why this is failing after changing Reshape
# operator from taking the new shape as attribute to as input
@unittest.skip('Start failing after Reshape op change')
def test_convert_end2end(self):
predict_net_f = tempfile.NamedTemporaryFile()
init_net_f = tempfile.NamedTemporaryFile()
onnx_model_f = tempfile.NamedTemporaryFile()
x = 'X'
w = 'W'
b = 'b'
y = 'Y'
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-convert-end2end'
predict_net.external_input[:] = [x, w, b]
predict_net.external_output[:] = [y]
predict_net.op.extend([
core.CreateOperator(
'FC',
inputs=[x, w, b],
outputs=[y],
axis=2,
),
])
predict_net_f.write(predict_net.SerializeToString())
predict_net_f.flush()
init_net = caffe2_pb2.NetDef()
init_net.name = 'test-convert-end2end-init'
init_net.external_output[:] = [w, b]
x_val = np.random.randn(1, 3, 2).astype(np.float32)
w_val = np.random.randn(4, 2).astype(np.float32)
b_val = np.random.randn(4).astype(np.float32)
init_net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
[w],
values=w_val,
shape=w_val.shape,
),
core.CreateOperator(
'GivenTensorFill',
[],
[b],
values=b_val,
shape=b_val.shape,
),
])
init_net_f.write(init_net.SerializeToString())
init_net_f.flush()
y_val = np.matmul(x_val, w_val.transpose()) + b_val
for _ in range(5):
self._run_command(
caffe2_to_onnx, [
predict_net_f.name,
'--caffe2-init-net', init_net_f.name,
'--output', onnx_model_f.name,
'--value-info',
json.dumps({
x: (TensorProto.FLOAT, (1, 3, 2)),
}),
],
catch_exceptions=False,
)
onnx_model_f.seek(0)
onnx_model = ModelProto()
onnx_model.ParseFromString(onnx_model_f.read())
np.testing.assert_almost_equal(
c2.run_model(
onnx_model, {onnx_model.graph.input[0].name: x_val}),
[y_val])
self._run_command(
onnx_to_caffe2, [
onnx_model_f.name,
'--output', predict_net_f.name,
'--init-net-output', init_net_f.name,
])
predict_net_f.seek(0)
predict_net = caffe2_pb2.NetDef()
predict_net.ParseFromString(predict_net_f.read())
init_net_f.seek(0)
init_net = caffe2_pb2.NetDef()
init_net.ParseFromString(init_net_f.read())
x = predict_net.external_input[0]
np.testing.assert_almost_equal(c2_native_run_net(init_net=init_net,
predict_net=predict_net,
inputs={x: x_val})[1],
[y_val])
|
pytorch-master
|
caffe2/python/onnx/tests/conversion_test.py
|
# @package onnx
# Module caffe2.python.onnx.tests.c2_ref_test
import os
import unittest
from caffe2.python import core
from caffe2.proto import caffe2_pb2
import onnx
from onnx.helper import make_node, make_graph, make_tensor, make_tensor_value_info, make_model
from caffe2.python.onnx.helper import c2_native_run_net, c2_native_run_op
from onnx import mapping
import caffe2.python.onnx.frontend as c2_onnx
import caffe2.python.onnx.backend as c2
import numpy as np
from caffe2.python.models.download import ModelDownloader
from caffe2.python.onnx.tests.test_utils import TestCase
import caffe2.python._import_c_extension as C
class TestCaffe2Basic(TestCase):
def test_dummy_name(self):
g = C.DummyName()
n1 = g.new_dummy_name()
n2 = g.new_dummy_name()
assert n1 != n2, "Got same names in different calls: {}".format(n1)
def test_check_arguments(self):
b2 = C.Caffe2Backend()
node_def = make_node("Add", inputs=["X", "Y"], outputs=["Z"])
b2.convert_node(node_def.SerializeToString())
bad_node_def = make_node("Add", inputs=["X", "Y"], outputs=["Z"], foo=42, bar=56)
with self.assertRaisesRegex(RuntimeError,
"Don't know how to map unexpected argument (foo|bar)"):
b2.convert_node(bad_node_def.SerializeToString())
def test_dynamicslice_3inputs_graph(self):
node_def = make_node(
"DynamicSlice", ["X1", "X2", "X3"], ["Y"])
graph_def = make_graph(
[node_def],
name="test",
inputs=[make_tensor_value_info("X1", onnx.TensorProto.FLOAT, (2, 4)),
make_tensor_value_info("X2", onnx.TensorProto.INT32, (1, 2)),
make_tensor_value_info("X3", onnx.TensorProto.INT32, (1, 2))],
outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, (1, 2))])
model_def = make_model(graph_def, producer_name='caffe2-ref-test')
x = [[1,2,3,4],[5,6,7,8]]
start = [0, 0]
end = [-1, 4]
prepared = c2.prepare(model_def)
output = prepared.run(inputs=[np.array(x), np.array(start), np.array(end)])
self.assertSameOutputs(output[0], np.array(x)[0:-1, 0:4])
def test_dynamicslice_4inputs_graph(self):
node_def = make_node(
"DynamicSlice", ["X1", "X2", "X3", "axes"], ["Y"])
graph_def = make_graph(
[node_def],
name="test",
inputs=[make_tensor_value_info("X1", onnx.TensorProto.FLOAT, (2, 4)),
make_tensor_value_info("X2", onnx.TensorProto.INT32, (1, 2)),
make_tensor_value_info("X3", onnx.TensorProto.INT32, (1, 2)),
make_tensor_value_info("axes", onnx.TensorProto.INT32, (1, 2))],
outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, (1, 2))])
model_def = make_model(graph_def, producer_name='caffe2-ref-test')
x = [[1,2,3,4],[5,6,7,8]]
start = [0, 1]
end = [4, 5]
axes = [1, 0]
prepared = c2.prepare(model_def)
output = prepared.run(inputs=[np.array(x), np.array(start), np.array(end), np.array(axes)])
self.assertSameOutputs(output[0], np.array(x)[1:5, 0:4])
def test_relu_graph(self):
X = np.random.randn(3, 2).astype(np.float32)
Y_ref = np.clip(X, 0, np.inf)
node_def = make_node(
"Relu", ["X"], ["Y"])
output = c2.run_node(
node_def, {"X": X})
np.testing.assert_almost_equal(output.Y, Y_ref)
graph_def = make_graph(
[node_def],
name="test",
inputs=[make_tensor_value_info("X", onnx.TensorProto.FLOAT, [3, 2])],
outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [3, 2])])
c2_rep = c2.prepare(make_model(graph_def, producer_name='caffe2-ref-test'))
output = c2_rep.run(X)
np.testing.assert_almost_equal(output.Y, Y_ref)
def test_elementwiselinear(self):
X = np.random.randn(4, 2, 5, 7, 3).astype(np.float32)
W = np.random.randn(21).astype(np.float32)
B = np.random.randn(21).astype(np.float32)
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-elementwiselinear-net'
predict_net.external_input[:] = ['X', 'W', 'B']
predict_net.external_output[:] = ['Y']
predict_net.op.extend([
core.CreateOperator(
'ElementwiseLinear',
inputs=['X', 'W', 'B'],
outputs=['Y'],
axis=3,
),
])
ws, c2_outputs = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[X, W, B])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape),
'W': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[W.dtype], W.shape),
'B': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[B.dtype], B.shape),
})
onnx_outputs = c2.run_model(onnx_model, inputs=[X, W, B])
self.assertSameOutputs(c2_outputs, onnx_outputs)
def test_initializer(self):
X = np.array([[1, 2], [3, 4]]).astype(np.float32)
Y = np.array([[1, 2], [3, 4]]).astype(np.float32)
weight = np.array([[1, 0], [0, 1]])
graph_def = make_graph(
[make_node("Add", ["X", "Y"], ["Z0"]),
make_node("Cast", ["Z0"], ["Z"], to=onnx.TensorProto.FLOAT),
make_node("Mul", ["Z", "weight"], ["W0"]),
make_node("Tanh", ["W0"], ["W1"]),
make_node("Sigmoid", ["W1"], ["W2"]),
make_node("Scale", ["W2"], ["W3"], scale=-1.0)],
name="test_initializer",
inputs=[
make_tensor_value_info("X", onnx.TensorProto.FLOAT, (2, 2)),
make_tensor_value_info("Y", onnx.TensorProto.FLOAT, (2, 2)),
make_tensor_value_info("weight", onnx.TensorProto.FLOAT, (2, 2)),
],
outputs=[
make_tensor_value_info("W3", onnx.TensorProto.FLOAT, (2, 2))
],
initializer=[make_tensor("weight",
onnx.TensorProto.FLOAT,
[2, 2],
weight.flatten().astype(float))]
)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
W_ref = -sigmoid(np.tanh((X + Y) * weight))
c2_rep = c2.prepare(make_model(graph_def, producer_name='caffe2-ref-test'))
output = c2_rep.run({"X": X, "Y": Y})
np.testing.assert_almost_equal(output["W3"], W_ref)
def test_reducemean(self):
X = np.random.randn(4, 6, 10, 5, 3).astype(np.float32)
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-reducemean-net'
predict_net.external_input[:] = ['X']
predict_net.external_output[:] = [
'reduce_front_mean',
'reduce_back_mean',
'reduce_mean_0',
'reduce_mean_1',
]
predict_net.op.extend([
core.CreateOperator(
'ReduceFrontMean',
inputs=['X'],
outputs=['reduce_front_mean'],
num_reduce_dim=2,
),
core.CreateOperator(
'ReduceBackMean',
inputs=['X'],
outputs=['reduce_back_mean'],
num_reduce_dim=2,
),
core.CreateOperator(
'ReduceMean',
inputs=['X'],
outputs=['reduce_mean_0'],
axes=[1, 3],
keepdims=0,
),
core.CreateOperator(
'ReduceMean',
inputs=['X'],
outputs=['reduce_mean_1'],
axes=[1, 3],
keepdims=1,
),
])
ws, c2_outputs = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[X])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape)
})
onnx_outputs = c2.run_model(onnx_model, inputs=[X])
self.assertSameOutputs(c2_outputs, onnx_outputs)
def test_upsample(self):
X = np.random.randn(1, 1, 2, 2).astype(np.float32)
width_scale = 2.0
height_scale = 2.0
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-upsample-net'
predict_net.external_input[:] = ['X']
predict_net.external_output[:] = ['Y']
predict_net.op.extend([
core.CreateOperator(
'ResizeNearest',
inputs=['X'],
outputs=['Y'],
width_scale=width_scale,
height_scale=height_scale,
),
])
ws, c2_outputs = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[X])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape)
})
onnx_outputs = c2.run_model(onnx_model, inputs=[X])
self.assertSameOutputs(c2_outputs, onnx_outputs)
def test_fc(self):
X_fake = np.zeros((3, 1, 3, 1, 7), dtype=np.float32)
X = np.random.randn(5, 2, 3, 1, 7).astype(np.float32)
W = np.random.randn(11, 21).astype(np.float32)
B = np.random.randn(11).astype(np.float32)
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-fc-net'
predict_net.external_input[:] = ['X', 'W', 'B']
predict_net.external_output[:] = ['Y']
predict_net.op.extend([
core.CreateOperator(
'FC',
inputs=['X', 'W', 'B'],
outputs=['Y'],
axis=2,
),
])
ws, c2_outputs = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[X, W, B])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X_fake.shape),
'W': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[W.dtype], W.shape),
'B': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[B.dtype], B.shape),
})
onnx_outputs = c2.run_model(onnx_model, inputs=[X, W, B])
self.assertSameOutputs(c2_outputs, onnx_outputs)
def test_gemm(self):
# simple
A = np.random.randn(3, 2).astype(np.float32)
B = np.random.randn(2, 4).astype(np.float32)
C = np.random.randn(3, 4).astype(np.float32)
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"])
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(output["Y"], np.dot(A, B) + C)
# transA
A = np.transpose(A)
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
transA=1)
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(
output["Y"],
np.dot(np.transpose(A), B) + C)
# revert A
A = np.transpose(A)
# transB
B = np.transpose(B)
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
transB=1)
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(
output["Y"],
np.dot(A, np.transpose(B)) + C)
# revert B
B = np.transpose(B)
# scale
alpha = np.random.random()
beta = np.random.random()
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta)
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, B) + beta * C)
# setup broadcastable C
C = np.random.randn(4).astype(np.float32)
# broadcast for opset7
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta)
output = c2.run_node(node_def, [A, B, C], opset_version=7)
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, B) + beta * C)
# broadcast for opset3 and 6
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta,
broadcast=1)
output = c2.run_node(node_def, [A, B, C], opset_version=6)
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, B) + beta * C)
# transB
B = np.transpose(B)
# transB and broadcast for opset7
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta,
transB=1)
output = c2.run_node(node_def, [A, B, C], opset_version=7)
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, np.transpose(B)) + beta * C)
# transB and broadcast for opset3 and 6
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta,
broadcast=1,
transB=1)
output = c2.run_node(node_def, [A, B, C], opset_version=6)
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, np.transpose(B)) + beta * C)
# revert B
B = np.transpose(B)
# set a scalar to C
C = np.random.randn(1).astype(np.float32)
# scalar broadcast for opset7
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta)
output = c2.run_node(node_def, [A, B, C], opset_version=7)
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, B) + beta * C)
# scalar broadcast for opset3 and 6
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta,
broadcast=1)
output = c2.run_node(node_def, [A, B, C], opset_version=6)
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, B) + beta * C)
def test_gemm_conversion(self):
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=2.,
beta=3.)
node_def_broadcast = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=2.,
beta=3.,
broadcast=1)
node_def_transpose_b = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=2.,
beta=3.,
transB=1)
node_def_transpose_b_broadcast = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=2.,
beta=3.,
transB=1,
broadcast=1)
backend = C.Caffe2Backend()
# without broadcast and without shape info, gemm will be
# converted to matmul + add
_, op_strs = backend.convert_node(node_def.SerializeToString())
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'MatMul', 'Add'])
# opset7
# If C is a 1d tensor, gemm will be converted to FC/FCTransposed
_, op_strs = backend.convert_node(node_def_transpose_b.SerializeToString(
), [make_tensor_value_info("C", onnx.TensorProto.FLOAT, (3,)).SerializeToString()],
7)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'FC'])
_, op_strs = backend.convert_node(node_def.SerializeToString(
), [make_tensor_value_info("C", onnx.TensorProto.FLOAT, (3,)).SerializeToString()],
7)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'FCTransposed'])
# opset6 without broadcast(C should match A*B's dim)
# The gemm will be converted to matmul + add, since the FC requires c
# to be 1d tensor.
_, op_strs = backend.convert_node(node_def.SerializeToString(
), [make_tensor_value_info("A", onnx.TensorProto.FLOAT, (3,2)).SerializeToString(),
make_tensor_value_info("B", onnx.TensorProto.FLOAT, (2,3)).SerializeToString(),
make_tensor_value_info("C", onnx.TensorProto.FLOAT, (3,3)).SerializeToString()],
6)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'MatMul', 'Add'])
# opset6 with broadcast
# If C is a 1d tensor, gemm will be converted to FC/FCTransposed
_, op_strs = backend.convert_node(node_def_transpose_b_broadcast.SerializeToString(
), [make_tensor_value_info("C", onnx.TensorProto.FLOAT, (3,)).SerializeToString()],
6)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'FC'])
_, op_strs = backend.convert_node(node_def_broadcast.SerializeToString(
), [make_tensor_value_info("C", onnx.TensorProto.FLOAT, (3,)).SerializeToString()],
6)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'FCTransposed'])
# opset7
# If C is a scalar and B's last dim is 1, gemm will be converted to FC/FCTransposed
_, op_strs = backend.convert_node(node_def_transpose_b.SerializeToString(
), [make_tensor_value_info("B", onnx.TensorProto.FLOAT, (1,2)).SerializeToString(),
make_tensor_value_info("C", onnx.TensorProto.FLOAT, (1,)).SerializeToString()],
7)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'FC'])
_, op_strs = backend.convert_node(node_def.SerializeToString(
), [make_tensor_value_info("B", onnx.TensorProto.FLOAT, (2,1)).SerializeToString(),
make_tensor_value_info("C", onnx.TensorProto.FLOAT, (1,)).SerializeToString()],
7)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'FCTransposed'])
# If C is a scalar and B's last dim is not 1, gemm will be converted
# to matmul + add.
_, op_strs = backend.convert_node(node_def_transpose_b.SerializeToString(
), [make_tensor_value_info("B", onnx.TensorProto.FLOAT, (2,2)).SerializeToString(),
make_tensor_value_info("C", onnx.TensorProto.FLOAT, (1,)).SerializeToString()],
7)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'MatMul', 'Add'])
# If C is a scalar and B's shape info is not available,
# gemm will be converted to matmul + add.
_, op_strs = backend.convert_node(node_def_transpose_b.SerializeToString(
), [make_tensor_value_info("C", onnx.TensorProto.FLOAT, (1,)).SerializeToString()],
7)
op_names = []
for s in op_strs:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op_names.append(op.type)
self.assertEqual(op_names, ['Scale', 'Scale', 'MatMul', 'Add'])
def test_mergedim(self):
X = np.random.randn(2, 3, 1, 5).astype(np.float32)
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-mergedim-net'
predict_net.external_input[:] = ['X']
predict_net.external_output[:] = ['Y']
predict_net.op.extend([
core.CreateOperator(
'MergeDim',
inputs=['X'],
outputs=['Y'],
),
])
ws, c2_outputs = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[X])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape),
})
onnx_outputs = c2.run_model(onnx_model, inputs=[X])
self.assertSameOutputs(c2_outputs, onnx_outputs)
def test_tensor_filling_ops(self):
for dtype in [
onnx.TensorProto.FLOAT,
onnx.TensorProto.DOUBLE,
onnx.TensorProto.BOOL,
onnx.TensorProto.INT8,
onnx.TensorProto.INT16,
onnx.TensorProto.INT32,
onnx.TensorProto.INT64,
onnx.TensorProto.UINT8,
onnx.TensorProto.UINT16,
onnx.TensorProto.UINT32,
]:
shape = (1, 2, 3)
vals = np.random.randn(*shape)
if dtype != onnx.TensorProto.BOOL:
vals *= 5
vals = vals.astype(
mapping.TENSOR_TYPE_TO_NP_TYPE[dtype])
tensor = make_tensor(
name='test-tensor-{}'.format(dtype),
data_type=dtype,
dims=[1, 2, 3],
vals=vals.flatten().tolist(),
)
op = c2.Caffe2Backend._create_tensor_filling_op(tensor)
self.assertEqual(len(op.input), 0)
self.assertEqual(op.output, [tensor.name])
ws, output = c2_native_run_op(op, inputs=[])
self.assertEqual(len(output), 1)
np.testing.assert_almost_equal(output[0], vals)
np.testing.assert_almost_equal(ws.FetchBlob(op.output[0]), vals)
def test_tensor_filling_ops_c_backend(self):
for dtype in [
onnx.TensorProto.FLOAT,
onnx.TensorProto.DOUBLE,
onnx.TensorProto.BOOL,
onnx.TensorProto.INT8,
onnx.TensorProto.INT16,
onnx.TensorProto.INT32,
onnx.TensorProto.INT64,
onnx.TensorProto.UINT8,
onnx.TensorProto.UINT16,
onnx.TensorProto.UINT32,
]:
shape = (1, 2, 3)
vals = np.random.randn(*shape)
if dtype != onnx.TensorProto.BOOL:
vals *= 5
vals = vals.astype(
mapping.TENSOR_TYPE_TO_NP_TYPE[dtype])
tensor = make_tensor(
name='test-tensor-{}'.format(dtype),
data_type=dtype,
dims=[1, 2, 3],
vals=vals.flatten().tolist(),
)
b = C.Caffe2Backend()
op = caffe2_pb2.OperatorDef()
op.ParseFromString(b._build_tensor_filling_op(tensor.SerializeToString(), ''))
self.assertEqual(len(op.input), 0)
self.assertEqual(op.output, [tensor.name])
ws, output = c2_native_run_op(op, inputs=[])
self.assertEqual(len(output), 1)
np.testing.assert_almost_equal(output[0], vals)
np.testing.assert_almost_equal(ws.FetchBlob(op.output[0]), vals)
def test_concat(self):
I0 = np.random.randn(20, 4).astype(np.float32)
I1 = np.random.randn(20, 4).astype(np.float32)
for i in range(2):
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-concat-net'
predict_net.external_input[:] = ['I0', 'I1']
predict_net.external_output[:] = ['Y', 'output_dim']
predict_net.op.extend([
core.CreateOperator(
'Concat',
inputs=['I0', 'I1'],
outputs=['Y', 'output_dim'],
axis=1,
add_axis=(1 if i == 0 else 0),
),
])
ws, c2_outputs = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[I0, I1])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'I0': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[I0.dtype], I0.shape),
'I1': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[I1.dtype], I1.shape),
})
onnx_outputs = c2.run_model(onnx_model, inputs=[I0, I1])
self.assertSameOutputs(c2_outputs, onnx_outputs)
def test_slice(self):
X = np.random.randn(1, 2, 3).astype(np.float32)
starts = np.array([0, 1, 0], dtype=np.int32)
ends = np.array([-1, 2, 3], dtype=np.int32)
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-slice-net'
predict_net.external_input[:] = ['X']
predict_net.external_output[:] = ['Y']
predict_net.op.extend([
core.CreateOperator(
'Slice',
inputs=['X'],
outputs=['Y'],
starts=starts,
ends=ends,
),
])
ws, c2_outputs = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[X])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape)
})
onnx_outputs = c2.run_model(onnx_model, inputs=[X])
self.assertSameOutputs(c2_outputs, onnx_outputs)
def test_cast(self):
X = np.random.randn(1, 2, 3).astype(np.float32)
for to_type in ['INT8', caffe2_pb2.TensorProto.INT8,
'DOUBLE', caffe2_pb2.TensorProto.DOUBLE]:
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-cast-net'
predict_net.external_input[:] = ['X']
predict_net.external_output[:] = ['Y']
predict_net.op.extend([
core.CreateOperator(
'Cast',
inputs=['X'],
outputs=['Y'],
to=to_type,
),
])
ws, c2_outputs = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[X])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape)
})
onnx_outputs = c2.run_model(onnx_model, inputs=[X])
self.assertSameOutputs(c2_outputs, onnx_outputs)
class TestCaffe2End2End(TestCase):
def setUp(self):
self.model_downloader = ModelDownloader('ONNX_MODELS')
def _test_net(self,
net_name,
input_blob_dims=(1, 3, 224, 224),
decimal=7):
np.random.seed(seed=0)
try:
c2_init_net, c2_predict_net, value_info, debug_str = self.model_downloader.get_c2_model_dbg(net_name)
except Exception as e:
# catch IOError/OSError that is caused by FileNotFoundError and PermissionError
# This is helpful because sometimes we get errors due to gfs not available
# get_c2_model_dbg wraps URLError/HTTPErrors into generic Exception
# Skip the tests if model can not be downloaded due to the any of the above
print("\n_test_net exception: ", e)
self.skipTest(str(e))
# start to run the model and compare outputs
n, c, h, w = input_blob_dims
data = np.random.randn(n, c, h, w).astype(np.float32)
inputs = [data]
_, c2_outputs = c2_native_run_net(c2_init_net, c2_predict_net, inputs, debug_str)
del _
model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=c2_predict_net,
init_net=c2_init_net,
value_info=value_info,
)
c2_ir = c2.prepare(model)
onnx_outputs = c2_ir.run(inputs)
self.assertSameOutputs(c2_outputs, onnx_outputs, decimal=decimal)
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_alexnet(self):
self._test_net('bvlc_alexnet', decimal=4)
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_resnet50(self):
self._test_net('resnet50')
@unittest.skipIf(
os.environ.get('JENKINS_URL') or os.environ.get('SKIP_IN_FB'),
'Taking too long to download!')
def test_vgg16(self):
self._test_net('vgg16')
@unittest.skipIf(
os.environ.get('JENKINS_URL') or os.environ.get('SKIP_IN_FB'),
'Taking too long to download!')
def test_zfnet(self):
self._test_net('zfnet')
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_inception_v1(self):
self._test_net('inception_v1', decimal=2)
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_inception_v2(self):
self._test_net('inception_v2')
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_squeezenet(self):
self._test_net('squeezenet')
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_densenet121(self):
self._test_net('densenet121')
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_bvlc_googlenet(self):
self._test_net('bvlc_googlenet')
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_bvlc_reference_caffenet(self):
self._test_net('bvlc_reference_caffenet')
@unittest.skipIf(
os.environ.get('SKIP_IN_FB'),
'Skip internally!')
def test_bvlc_reference_rcnn_ilsvrc13(self):
self._test_net('bvlc_reference_rcnn_ilsvrc13')
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/onnx/tests/c2_ref_test.py
|
###################################################################################################
# ATTENTION! This test will most probably fail if you install TensorRT 6.0.1 only.
# That's because it's shipped with older version of ONNX parser not supporting some
# required features. To make it work please use new version: https://github.com/onnx/onnx-tensorrt
# Just clone it and do something like this:
#
# ~/pt/third_party/onnx-tensorrt$ mkdir build/
# ~/pt/third_party/onnx-tensorrt$ cd build/
# ~/pt/third_party/onnx-tensorrt/build$ cmake ..
# ~/pt/third_party/onnx-tensorrt/build$ make
# ~/pt/third_party/onnx-tensorrt/build$ sudo cp libnvonnxparser.so.6.0.1 /usr/lib/x86_64-linux-gnu
#
# This note is valid for 6.0.1 release only. September 18th, 2019.
###################################################################################################
import os
import unittest
from PIL import Image
import numpy as np
import torch
import torchvision.models as models
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def allocate_buffers(engine):
h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)),
dtype=trt.nptype(trt.float32))
h_output = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)),
dtype=trt.nptype(trt.float32))
d_input = cuda.mem_alloc(h_input.nbytes)
d_output = cuda.mem_alloc(h_output.nbytes)
stream = cuda.Stream()
return h_input, d_input, h_output, d_output, stream
def load_normalized_test_case(input_shape, test_image, pagelocked_buffer, normalization_hint):
def normalize_image(image):
c, h, w = input_shape
image_arr = np.asarray(image.resize((w, h), Image.ANTIALIAS)).transpose([2, 0, 1])\
.astype(trt.nptype(trt.float32)).ravel()
if (normalization_hint == 0):
return (image_arr / 255.0 - 0.45) / 0.225
elif (normalization_hint == 1):
return (image_arr / 256.0 - 0.5)
np.copyto(pagelocked_buffer, normalize_image(Image.open(test_image)))
return test_image
class Test_PT_ONNX_TRT(unittest.TestCase):
def __enter__(self):
return self
def setUp(self):
data_path = os.path.join(os.path.dirname(__file__), 'data')
self.image_files=["binoculars.jpeg", "reflex_camera.jpeg", "tabby_tiger_cat.jpg"]
for index, f in enumerate(self.image_files):
self.image_files[index] = os.path.abspath(os.path.join(data_path, f))
if not os.path.exists(self.image_files[index]):
raise FileNotFoundError(self.image_files[index] + " does not exist.")
with open(os.path.abspath(os.path.join(data_path, "class_labels.txt")), 'r') as f:
self.labels = f.read().split('\n')
def build_engine_onnx(self, model_file):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(flags = 1) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder_config = builder.create_builder_config()
builder_config.max_workspace_size = 1 << 33
with open(model_file, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
self.fail("ERROR: {}".format(parser.get_error(error)))
return builder.build_engine(network, builder_config)
def _test_model(self, model_name, input_shape = (3, 224, 224), normalization_hint = 0):
model = getattr(models, model_name)(pretrained=True)
shape = (1,) + input_shape
dummy_input = (torch.randn(shape),)
onnx_name = model_name + ".onnx"
torch.onnx.export(model,
dummy_input,
onnx_name,
input_names = [],
output_names = [],
verbose=False,
export_params=True,
opset_version=9)
with self.build_engine_onnx(onnx_name) as engine:
h_input, d_input, h_output, d_output, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
err_count = 0
for index, f in enumerate(self.image_files):
test_case = load_normalized_test_case(input_shape, f,\
h_input, normalization_hint)
cuda.memcpy_htod_async(d_input, h_input, stream)
context.execute_async_v2(bindings=[d_input, d_output],
stream_handle=stream.handle)
cuda.memcpy_dtoh_async(h_output, d_output, stream)
stream.synchronize()
amax = np.argmax(h_output)
pred = self.labels[amax]
if "_".join(pred.split()) not in\
os.path.splitext(os.path.basename(test_case))[0]:
err_count = err_count + 1
self.assertLessEqual(err_count, 1, "Too many recognition errors")
def test_alexnet(self):
self._test_model("alexnet", (3, 227, 227))
def test_resnet18(self):
self._test_model("resnet18")
def test_resnet34(self):
self._test_model("resnet34")
def test_resnet50(self):
self._test_model("resnet50")
def test_resnet101(self):
self._test_model("resnet101")
@unittest.skip("Takes 2m")
def test_resnet152(self):
self._test_model("resnet152")
def test_resnet50_2(self):
self._test_model("wide_resnet50_2")
@unittest.skip("Takes 2m")
def test_resnet101_2(self):
self._test_model("wide_resnet101_2")
def test_squeezenet1_0(self):
self._test_model("squeezenet1_0")
def test_squeezenet1_1(self):
self._test_model("squeezenet1_1")
def test_googlenet(self):
self._test_model("googlenet")
def test_inception_v3(self):
self._test_model("inception_v3")
def test_mnasnet0_5(self):
self._test_model("mnasnet0_5", normalization_hint = 1)
def test_mnasnet1_0(self):
self._test_model("mnasnet1_0", normalization_hint = 1)
def test_mobilenet_v2(self):
self._test_model("mobilenet_v2", normalization_hint = 1)
def test_shufflenet_v2_x0_5(self):
self._test_model("shufflenet_v2_x0_5")
def test_shufflenet_v2_x1_0(self):
self._test_model("shufflenet_v2_x1_0")
def test_vgg11(self):
self._test_model("vgg11")
def test_vgg11_bn(self):
self._test_model("vgg11_bn")
def test_vgg13(self):
self._test_model("vgg13")
def test_vgg13_bn(self):
self._test_model("vgg13_bn")
def test_vgg16(self):
self._test_model("vgg16")
def test_vgg16_bn(self):
self._test_model("vgg16_bn")
def test_vgg19(self):
self._test_model("vgg19")
def test_vgg19_bn(self):
self._test_model("vgg19_bn")
@unittest.skip("Takes 13m")
def test_densenet121(self):
self._test_model("densenet121")
@unittest.skip("Takes 25m")
def test_densenet161(self):
self._test_model("densenet161")
@unittest.skip("Takes 27m")
def test_densenet169(self):
self._test_model("densenet169")
@unittest.skip("Takes 44m")
def test_densenet201(self):
self._test_model("densenet201")
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/trt/test_pt_onnx_trt.py
|
pytorch-master
|
caffe2/python/trt/__init__.py
|
|
## @package onnx
#Module caffe2.python.trt.transform
"""
TensorRT related transformation
Note that ONNX-TRT enforce an NCHW input!
"""
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace
import caffe2.python._import_c_extension as C
import numpy as np
def _dim_values_to_list(dim_values):
return [x.dim_value for x in dim_values]
def _get_output_shapes(output_value_infos):
names = [x.name for x in output_value_infos]
shapes = [_dim_values_to_list(x.type.tensor_type.shape.dim) for x in output_value_infos]
return dict(zip(names, shapes))
def check_gpu_():
try:
C.get_cuda_version()
except Exception as _:
raise Exception("TensorRT related functions require CUDA support")
def convert_onnx_model_to_trt_op(onnx_model,
max_batch_size=64,
max_workspace_size=2*1024*1024,
verbosity=1,
debug_builder=False):
"""
Convert the whole ONNX model to a TensorRT C2 op
"""
check_gpu_()
trt_str = C.onnx_to_trt_op(onnx_model.SerializeToString(),
_get_output_shapes(onnx_model.graph.output),
max_batch_size,
max_workspace_size,
verbosity,
debug_builder)
op = caffe2_pb2.OperatorDef()
op.ParseFromString(trt_str)
return op
# Assume the workspace is already filled with init weights
def _infer_shapes(pred_net, inputs):
workspace.RunNetOnce(pred_net)
hints = {}
for op in pred_net.op:
for o in op.output:
if o not in hints:
blob = workspace.FetchBlob(o)
if hasattr(blob, 'shape'):
hints[o] = blob.shape
for i in op.input:
if i not in hints:
blob = workspace.FetchBlob(i)
if hasattr(blob, 'shape'):
hints[i] = blob.shape
return hints
def transform_caffe2_net(
pred_net,
input_shapes,
populate_shapes = False,
max_batch_size=64,
max_workspace_size=2*1024*1024,
verbosity=1,
debug_builder=False,
build_serializable_op=True):
"""
Transform the caffe2_net by collapsing TRT-runnable nodes into trt c2 ops
"""
check_gpu_()
# Hacky way to infer shapes as not all our operators have shape inference function.
# Normally this is not needed
shape_hints = {}
if populate_shapes:
input_data = {}
for k,v in input_shapes.items():
input_data[k] = np.random.randn(*v).astype(np.float32)
shape_hints = _infer_shapes(pred_net, input_data)
for k,v in input_shapes.items():
shape_hints[k] = v
pred_net_str = C.transform_trt(pred_net.SerializeToString(),
shape_hints,
max_batch_size,
max_workspace_size,
verbosity,
debug_builder,
build_serializable_op)
pred_net_cut = caffe2_pb2.NetDef()
pred_net_cut.ParseFromString(pred_net_str)
return pred_net_cut
|
pytorch-master
|
caffe2/python/trt/transform.py
|
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import onnx
import onnx.defs
from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model
from onnx.backend.base import namedtupledict
from caffe2.python.models.download import ModelDownloader
import caffe2.python.onnx.backend as c2
from caffe2.python.onnx.workspace import Workspace
from caffe2.python.trt.transform import convert_onnx_model_to_trt_op, transform_caffe2_net
from caffe2.python.onnx.tests.test_utils import TestCase
import numpy as np
import os.path
import time
import unittest
import tarfile
import tempfile
import shutil
from six.moves.urllib.request import urlretrieve
def _print_net(net):
for i in net.external_input:
print("Input: {}".format(i))
for i in net.external_output:
print("Output: {}".format(i))
for op in net.op:
print("Op {}".format(op.type))
for x in op.input:
print(" input: {}".format(x))
for y in op.output:
print(" output: {}".format(y))
def _base_url(opset_version):
return 'https://s3.amazonaws.com/download.onnx/models/opset_{}'.format(opset_version)
# TODO: This is copied from https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py. Maybe we should
# expose a model retrival API from ONNX
def _download_onnx_model(model_name, opset_version):
onnx_home = os.path.expanduser(os.getenv('ONNX_HOME', os.path.join('~', '.onnx')))
models_dir = os.getenv('ONNX_MODELS',
os.path.join(onnx_home, 'models'))
model_dir = os.path.join(models_dir, model_name)
if not os.path.exists(os.path.join(model_dir, 'model.onnx')):
if os.path.exists(model_dir):
bi = 0
while True:
dest = '{}.old.{}'.format(model_dir, bi)
if os.path.exists(dest):
bi += 1
continue
shutil.move(model_dir, dest)
break
os.makedirs(model_dir)
# On Windows, NamedTemporaryFile can not be opened for a
# second time
url = '{}/{}.tar.gz'.format(_base_url(opset_version), model_name)
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Start downloading model {} from {}'.format(
model_name, url))
urlretrieve(url, download_file.name)
print('Done')
with tarfile.open(download_file.name) as t:
t.extractall(models_dir)
except Exception as e:
print('Failed to prepare data for model {}: {}'.format(
model_name, e))
raise
finally:
os.remove(download_file.name)
return model_dir
class TensorRTOpTest(TestCase):
def setUp(self):
self.opset_version = onnx.defs.onnx_opset_version()
def _test_relu_graph(self, X, batch_size, trt_max_batch_size):
node_def = make_node("Relu", ["X"], ["Y"])
Y_c2 = c2.run_node(node_def, {"X": X})
graph_def = make_graph(
[node_def],
name="test",
inputs=[make_tensor_value_info("X", onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])],
outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])])
model_def = make_model(graph_def, producer_name='relu-test')
op_outputs = [x.name for x in model_def.graph.output]
op = convert_onnx_model_to_trt_op(model_def, max_batch_size=trt_max_batch_size)
device_option = core.DeviceOption(caffe2_pb2.CUDA, 0)
op.device_option.CopyFrom(device_option)
Y_trt = None
ws = Workspace()
with core.DeviceScope(device_option):
ws.FeedBlob("X", X)
ws.RunOperatorsOnce([op])
output_values = [ws.FetchBlob(name) for name in op_outputs]
Y_trt = namedtupledict('Outputs', op_outputs)(*output_values)
np.testing.assert_almost_equal(Y_c2, Y_trt)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_relu_graph_simple(self):
X = np.random.randn(1, 1, 3, 2).astype(np.float32)
self._test_relu_graph(X, 1, 50)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_relu_graph_big_batch(self):
X = np.random.randn(52, 1, 3, 2).astype(np.float32)
self._test_relu_graph(X, 52, 50)
def _test_onnx_importer(self, model_name, data_input_index, opset_version=onnx.defs.onnx_opset_version()):
model_dir = _download_onnx_model(model_name, opset_version)
model_def = onnx.load(os.path.join(model_dir, 'model.onnx'))
input_blob_dims = [int(x.dim_value) for x in model_def.graph.input[data_input_index].type.tensor_type.shape.dim]
op_inputs = [x.name for x in model_def.graph.input]
op_outputs = [x.name for x in model_def.graph.output]
print("{}".format(op_inputs))
data = np.random.randn(*input_blob_dims).astype(np.float32)
Y_c2 = c2.run_model(model_def, {op_inputs[data_input_index]: data})
op = convert_onnx_model_to_trt_op(model_def, verbosity=3)
device_option = core.DeviceOption(caffe2_pb2.CUDA, 0)
op.device_option.CopyFrom(device_option)
Y_trt = None
ws = Workspace()
with core.DeviceScope(device_option):
ws.FeedBlob(op_inputs[data_input_index], data)
if opset_version >= 5:
# Some newer models from ONNX Zoo come with pre-set "data_0" input
ws.FeedBlob("data_0", data)
ws.RunOperatorsOnce([op])
output_values = [ws.FetchBlob(name) for name in op_outputs]
Y_trt = namedtupledict('Outputs', op_outputs)(*output_values)
np.testing.assert_allclose(Y_c2, Y_trt, rtol=1e-3)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_resnet50(self):
self._test_onnx_importer('resnet50', 0, 9)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_bvlc_alexnet(self):
self._test_onnx_importer('bvlc_alexnet', 0, 9)
@unittest.skip("Until fixing Unsqueeze op")
def test_densenet121(self):
self._test_onnx_importer('densenet121', -1, 3)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_inception_v1(self):
self._test_onnx_importer('inception_v1', -3, 9)
@unittest.skip("Until fixing Unsqueeze op")
def test_inception_v2(self):
self._test_onnx_importer('inception_v2', 0, 9)
@unittest.skip('Need to revisit our ChannelShuffle exporter to avoid generating 5D tensor')
def test_shufflenet(self):
self._test_onnx_importer('shufflenet', 0)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_squeezenet(self):
self._test_onnx_importer('squeezenet', -1, 9)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_vgg16(self):
self._test_onnx_importer('vgg16', 0, 9)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_vgg19(self):
self._test_onnx_importer('vgg19', -2, 9)
class TensorRTTransformTest(TestCase):
def setUp(self):
self.model_downloader = ModelDownloader()
def _add_head_tail(self, pred_net, new_head, new_tail):
orig_head = pred_net.external_input[0]
orig_tail = pred_net.external_output[0]
# Add head
head = caffe2_pb2.OperatorDef()
head.type = "Copy"
head.input.append(new_head)
head.output.append(orig_head)
dummy = caffe2_pb2.NetDef()
dummy.op.extend(pred_net.op)
del pred_net.op[:]
pred_net.op.extend([head])
pred_net.op.extend(dummy.op)
pred_net.external_input[0] = new_head
# Add tail
tail = caffe2_pb2.OperatorDef()
tail.type = "Copy"
tail.input.append(orig_tail)
tail.output.append(new_tail)
pred_net.op.extend([tail])
pred_net.external_output[0] = new_tail
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_resnet50_core(self):
N = 2
warmup = 20
repeat = 100
print("Batch size: {}, repeat inference {} times, warmup {} times".format(N, repeat, warmup))
init_net, pred_net, _ = self.model_downloader.get_c2_model('resnet50')
self._add_head_tail(pred_net, 'real_data', 'real_softmax')
input_blob_dims = (N, 3, 224, 224)
input_name = "real_data"
device_option = core.DeviceOption(caffe2_pb2.CUDA, 0)
init_net.device_option.CopyFrom(device_option)
pred_net.device_option.CopyFrom(device_option)
for op in pred_net.op:
op.device_option.CopyFrom(device_option)
op.engine = 'CUDNN'
net_outputs = pred_net.external_output
Y_c2 = None
data = np.random.randn(*input_blob_dims).astype(np.float32)
c2_time = 1
workspace.SwitchWorkspace("gpu_test", True)
with core.DeviceScope(device_option):
workspace.FeedBlob(input_name, data)
workspace.RunNetOnce(init_net)
workspace.CreateNet(pred_net)
for _ in range(warmup):
workspace.RunNet(pred_net.name)
start = time.time()
for _ in range(repeat):
workspace.RunNet(pred_net.name)
end = time.time()
c2_time = end - start
output_values = [workspace.FetchBlob(name) for name in net_outputs]
Y_c2 = namedtupledict('Outputs', net_outputs)(*output_values)
workspace.ResetWorkspace()
# Fill the workspace with the weights
with core.DeviceScope(device_option):
workspace.RunNetOnce(init_net)
# Cut the graph
start = time.time()
pred_net_cut = transform_caffe2_net(pred_net,
{input_name: input_blob_dims},
build_serializable_op=False)
del init_net, pred_net
pred_net_cut.device_option.CopyFrom(device_option)
for op in pred_net_cut.op:
op.device_option.CopyFrom(device_option)
#_print_net(pred_net_cut)
Y_trt = None
input_name = pred_net_cut.external_input[0]
print("C2 runtime: {}s".format(c2_time))
with core.DeviceScope(device_option):
workspace.FeedBlob(input_name, data)
workspace.CreateNet(pred_net_cut)
end = time.time()
print("Conversion time: {:.2f}s".format(end -start))
for _ in range(warmup):
workspace.RunNet(pred_net_cut.name)
start = time.time()
for _ in range(repeat):
workspace.RunNet(pred_net_cut.name)
end = time.time()
trt_time = end - start
print("TRT runtime: {}s, improvement: {}%".format(trt_time, (c2_time-trt_time)/c2_time*100))
output_values = [workspace.FetchBlob(name) for name in net_outputs]
Y_trt = namedtupledict('Outputs', net_outputs)(*output_values)
np.testing.assert_allclose(Y_c2, Y_trt, rtol=1e-3)
|
pytorch-master
|
caffe2/python/trt/test_trt.py
|
import functools
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestStorm(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=3),
grad_sq_sum=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
lr=st.floats(min_value=0.01, max_value=1.0,
allow_nan=False, allow_infinity=False),
momentum=st.floats(min_value=0.1, max_value=100.0,
allow_nan=False, allow_infinity=False),
beta=st.floats(min_value=0.1, max_value=10.0,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
def test_storm_dense(self, inputs, grad_sq_sum, lr, momentum, beta, gc, dc):
param, moment, grad = inputs
grad_sq_sum = np.array([grad_sq_sum], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
"Storm",
["param", "moment", "grad_sq_sum", "grad", "lr"],
["param", "moment", "grad_sq_sum"],
momentum=momentum,
beta=beta,
device_option=gc
)
def ref_dense(param, moment, grad_sq_sum, grad, lr, momentum, beta):
grad_sq_sum_out = grad_sq_sum + np.sum(grad * grad)
nlr = lr * np.power(beta + grad_sq_sum_out, -1.0 / 3.0)
alpha = momentum * np.square(nlr)
moment_out = grad + (1 - alpha) * (moment - grad)
param_out = param + nlr * moment_out
return (param_out.astype(np.float32), moment_out.astype(np.float32),
grad_sq_sum_out.astype(np.float32))
self.assertReferenceChecks(
gc, op,
[param, moment, grad_sq_sum, grad, lr],
functools.partial(ref_dense, momentum=momentum, beta=beta)
)
# Suppress filter_too_much health check.
# Likely caused by `assume` call falling through too often.
@settings(suppress_health_check=[HealthCheck.filter_too_much])
@given(inputs=hu.tensors(n=3),
grad_sq_sum=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
lr=st.floats(min_value=0.01, max_value=1.0,
allow_nan=False, allow_infinity=False),
momentum=st.floats(min_value=0.1, max_value=100.0,
allow_nan=False, allow_infinity=False),
beta=st.floats(min_value=0.1, max_value=10.0,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
def test_storm_sparse(self, inputs, grad_sq_sum, lr,
momentum, beta, gc, dc):
param, moment, grad = inputs
grad_sq_sum = np.array([grad_sq_sum], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
# Create an indexing array containing values that are lists of indices,
# which index into grad
indices = np.random.choice(np.arange(grad.shape[0]),
size=np.random.randint(grad.shape[0]),
replace=False)
# Sparsify grad
grad = grad[indices]
op = core.CreateOperator(
"SparseStorm",
["param", "moment", "grad_sq_sum", "grad", "indices", "lr"],
["param", "moment", "grad_sq_sum"],
momentum=momentum,
beta=beta,
device_option=gc)
def ref_sparse(param, moment, grad_sq_sum, grad, indices,
lr, momentum, beta):
param_out = np.copy(param)
moment_out = np.copy(moment)
grad_sq_sum_out = np.copy(grad_sq_sum)
grad_sq_sum_out = grad_sq_sum + np.sum(grad * grad)
nlr = lr * np.power(beta + grad_sq_sum_out, -1.0 / 3.0)
alpha = momentum * np.square(nlr)
for i, index in enumerate(indices):
gi = grad[i]
moment_out[index] = gi + (1 - alpha) * (moment[index] - gi)
param_out[index] = param[index] + nlr * moment_out[index]
return (param_out.astype(np.float32), moment_out.astype(np.float32),
grad_sq_sum_out.astype(np.float32))
self.assertReferenceChecks(
gc, op,
[param, moment, grad_sq_sum, grad, indices, lr],
functools.partial(ref_sparse, momentum=momentum, beta=beta)
)
@given(inputs=hu.tensors(n=2),
grad_sq_sum=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
lr=st.floats(min_value=0.01, max_value=1.0,
allow_nan=False, allow_infinity=False),
momentum=st.floats(min_value=0.1, max_value=100.0,
allow_nan=False, allow_infinity=False),
beta=st.floats(min_value=0.1, max_value=10.0,
allow_nan=False, allow_infinity=False),
data_strategy=st.data(),
**hu.gcs_cpu_only)
def test_storm_sparse_empty(self, inputs, grad_sq_sum, lr, momentum,
beta, data_strategy, gc, dc):
param, moment = inputs
grad_sq_sum = np.array([grad_sq_sum], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
indices = np.empty(shape=(0,), dtype=np.int64)
op = core.CreateOperator(
"SparseStorm",
["param", "moment", "grad_sq_sum", "grad", "indices", "lr"],
["param", "moment", "grad_sq_sum"],
momentum=momentum,
beta=beta,
device_option=gc)
def ref_sparse_empty(param, moment, grad_sq_sum, grad, indices,
lr, momentum, beta):
param_out = np.copy(param)
moment_out = np.copy(moment)
grad_sq_sum_out = np.copy(grad_sq_sum)
return (param_out.astype(np.float32), moment_out.astype(np.float32),
grad_sq_sum_out.astype(np.float32))
self.assertReferenceChecks(
gc, op,
[param, moment, grad_sq_sum, grad, indices, lr],
functools.partial(ref_sparse_empty, momentum=momentum, beta=beta)
)
|
pytorch-master
|
caffe2/python/operator_test/storm_test.py
|
import inspect
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestMatMul(serial.SerializedTestCase):
@serial.given(
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
trans_a=st.booleans(),
trans_b=st.booleans(),
**hu.gcs
)
def test_matmul(self, M, K, N, trans_a, trans_b, gc, dc):
X = np.random.rand(M, K).astype(np.float32) - 0.5
if trans_a:
X = X.transpose()
Y = np.random.rand(K, N).astype(np.float32) - 0.5
if trans_b:
Y = Y.transpose()
op = core.CreateOperator(
'MatMul', ['X', 'Y'], 'out', trans_a=trans_a, trans_b=trans_b
)
def matmul_ref(X, Y, trans_a, trans_b):
XX = X.transpose() if trans_a else X
YY = Y.transpose() if trans_b else Y
return (XX.dot(YY), )
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, Y, trans_a, trans_b], matmul_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, Y], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
# Gradient check wrt Y
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
axis_a=st.sampled_from([-3, -2, -1, 1, 2, 3]),
axis_b=st.sampled_from([-3, -2, -1, 1, 2, 3]),
trans_a=st.booleans(),
trans_b=st.booleans(),
**hu.gcs
)
@settings(deadline=10000)
def test_matmul_axis(
self, M, K, N, axis_a, axis_b, trans_a, trans_b, gc, dc
):
X = np.random.rand(M, K).astype(np.float32) - 0.5
if trans_a:
X = X.transpose()
shape_x = [X.shape[0], 1, 1, 1]
shape_x[axis_a] = X.shape[1]
X = X.reshape(*shape_x)
Y = np.random.rand(K, N).astype(np.float32) - 0.5
if trans_b:
Y = Y.transpose()
shape_y = [Y.shape[0], 1, 1, 1]
shape_y[axis_b] = Y.shape[1]
Y = Y.reshape(*shape_y)
op = core.CreateOperator(
'MatMul', ['X', 'Y'],
'out',
axis_a=axis_a,
axis_b=axis_b,
trans_a=trans_a,
trans_b=trans_b
)
def size_to_dim(X, axis):
dim = 1
for i in range(axis):
dim *= X.shape[i]
return dim
def size_from_dim(X, axis):
dim = 1
for i in range(axis, X.ndim):
dim *= X.shape[i]
return dim
def reshape(X, axis):
dim_0, dim_1 = size_to_dim(X, axis), size_from_dim(X, axis)
return X.reshape(dim_0, dim_1)
def canonical_axis(axis, ndim):
return ndim + axis if axis < 0 else axis
def matmul_ref(X, Y, axis_a, axis_b, trans_a, trans_b):
can_axis_a = canonical_axis(axis_a, X.ndim)
can_axis_b = canonical_axis(axis_b, Y.ndim)
X, Y = reshape(X, can_axis_a), reshape(Y, can_axis_b)
XX = X.transpose() if trans_a else X
YY = Y.transpose() if trans_b else Y
return (XX.dot(YY), )
# Check against numpy reference
self.assertReferenceChecks(
gc, op, [X, Y, axis_a, axis_b, trans_a, trans_b], matmul_ref
)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, Y], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
# Gradient check wrt Y
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
class TestBatchMatMul(serial.SerializedTestCase):
@settings(max_examples=30, deadline=None)
@given(
C=st.integers(min_value=0, max_value=3), # number of batch dims
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
trans_a=st.booleans(),
trans_b=st.booleans(),
dtype=st.sampled_from([np.float32, np.float16]),
**hu.gcs
)
def test_batch_matmul(self, C, M, K, N, trans_a, trans_b, dtype, gc, dc):
if dtype == np.float16:
# fp16 is only supported with CUDA/HIP
assume(core.IsGPUDeviceType(gc.device_type))
dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
batch_dims = np.random.randint(
low=1,
high=3,
size=C,
dtype=np.int64).tolist()
X = np.random.rand(*(batch_dims + [M, K])).astype(dtype) - 0.5
if trans_a:
X = X.swapaxes(-1, -2)
Y = np.random.rand(*(batch_dims + [K, N])).astype(dtype) - 0.5
if trans_b:
Y = Y.swapaxes(-1, -2)
op = core.CreateOperator(
'BatchMatMul', ['X', 'Y'], 'out', trans_a=trans_a, trans_b=trans_b
)
def matmul_ref(X, Y, trans_a, trans_b, dtype):
XX = (X.swapaxes(-1, -2) if trans_a else X).astype(np.float32)
YY = (Y.swapaxes(-1, -2) if trans_b else Y).astype(np.float32)
return (np.matmul(XX, YY).astype(dtype),)
# relaxing the "threshold" for fp16 to 150x of the default
def relax_fp16_check(check_func, *args, **kwargs):
# inspect the default "threshold" value in check_func
argspec = inspect.getargspec(check_func)
threshold = argspec.defaults[
argspec.args.index('threshold') -
(len(argspec.args) - len(argspec.defaults))]
if dtype == np.float16:
threshold = 150 * threshold
check_func(*args, threshold=threshold, **kwargs)
# Check against numpy reference
relax_fp16_check(self.assertReferenceChecks, gc, op, [X, Y, trans_a, trans_b, dtype], matmul_ref)
# Check over multiple devices
relax_fp16_check(self.assertDeviceChecks, dc, op, [X, Y], [0])
# Gradient check wrt X
relax_fp16_check(self.assertGradientChecks, gc, op, [X, Y], 0, [0])
# Gradient check wrt Y
relax_fp16_check(self.assertGradientChecks, gc, op, [X, Y], 1, [0])
def _test_batch_matmul_with_broadcast_common(
self,
X,
Y,
dtype,
gc,
dc,
trans_a=None,
trans_b=None,
):
if trans_a is not None and trans_b is not None:
op = core.CreateOperator(
'BatchMatMul', ['X', 'Y'], 'out', trans_a=trans_a, trans_b=trans_b, broadcast=1
)
else:
op = core.CreateOperator(
'BatchMatMul', ['X', 'Y'], 'out', broadcast=1
)
def matmul_ref(X, Y, trans_a, trans_b, dtype):
XX = (X.swapaxes(-1, -2) if trans_a else X).astype(np.float32)
YY = (Y.swapaxes(-1, -2) if trans_b else Y).astype(np.float32)
return (np.matmul(XX, YY).astype(dtype),)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, Y, trans_a, trans_b, dtype], matmul_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, Y], [0])
@given(
C_1=st.integers(min_value=0, max_value=3), # number of batch dims
C_2=st.integers(min_value=0, max_value=3),
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
trans_a=st.booleans(),
trans_b=st.booleans(),
**hu.gcs
)
@settings(deadline=10000)
def test_numpy_batch_matmul(self, C_1, C_2, M, K, N, trans_a, trans_b, gc, dc):
dtype = np.float32
batch_dims = np.random.randint(
low=0,
high=3,
size=max(C_1, C_2),
dtype=np.int64).tolist()
lbd = len(batch_dims)
X = np.random.rand(*(batch_dims[lbd - C_1:] + [M, K])).astype(dtype) - 0.5
if trans_a:
X = X.swapaxes(-1, -2)
Y = np.random.rand(*(batch_dims[lbd - C_2:] + [K, N])).astype(dtype) - 0.5
if trans_b:
Y = Y.swapaxes(-1, -2)
self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc, trans_a, trans_b)
@settings(max_examples=30, deadline=None)
@given(
K=st.integers(min_value=1, max_value=10),
**hu.gcs
)
def test_numpy_batch_matmul_1d(self, K, gc, dc):
dtype = np.float32
X = np.random.rand(K).astype(dtype) - 0.5
# TODO: test trans_a and trans_b
Y = np.random.rand(K).astype(dtype) - 0.5
self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)
@settings(max_examples=30, deadline=None)
@given(
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
**hu.gcs
)
def test_numpy_batch_matmul_1d_2d(self, K, N, gc, dc):
dtype = np.float32
X = np.random.rand(K).astype(dtype) - 0.5
# TODO: test trans_a and trans_b
Y = np.random.rand(*[K, N]).astype(dtype) - 0.5
self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)
@settings(max_examples=30, deadline=None)
@given(
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs
)
def test_numpy_batch_matmul_2d_1d(self, M, K, gc, dc):
dtype = np.float32
X = np.random.rand(*[M, K]).astype(dtype) - 0.5
# TODO: test trans_a and trans_b
Y = np.random.rand(K).astype(dtype) - 0.5
self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/matmul_op_test.py
|
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import numpy.testing as npt
from hypothesis import given, settings
import hypothesis.strategies as st
import functools
def primefac(n):
ret = []
divisor = 2
while divisor * divisor <= n:
while (n % divisor) == 0:
ret.append(divisor)
n = n // divisor
divisor = divisor + 1
if n > 1:
ret.append(n)
return ret
class TestReBatchingQueue(TestCase):
def test_rebatching_queue_single_enqueue_dequeue(self):
net = core.Net('net')
tensors = [
net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(3)
]
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, tensors[0]], [])
net.EnqueueRebatchingQueue([queue, tensors[1]], [])
net.EnqueueRebatchingQueue([queue, tensors[2]], [])
results = [
net.DequeueRebatchingQueue([queue], 1),
net.DequeueRebatchingQueue([queue], 1),
net.DequeueRebatchingQueue([queue], 1),
]
workspace.RunNetOnce(net)
for idx in range(3):
self.assertEquals(workspace.FetchBlob(results[idx]), [1.0])
def test_rebatching_queue_multi_enqueue_dequeue(self):
net = core.Net('net')
workspace.FeedBlob(
"tensors", np.array([x for x in range(10)], np.int32)
)
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, "tensors"], [], enqueue_batch=True)
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
workspace.RunNetOnce(net)
npt.assert_array_equal(
workspace.FetchBlob(results[0]), workspace.FetchBlob("tensors")[:5]
)
npt.assert_array_equal(
workspace.FetchBlob(results[1]), workspace.FetchBlob("tensors")[5:]
)
def test_rebatching_queue_closes_properly(self):
net = core.Net('net')
workspace.FeedBlob(
"tensors", np.array([x for x in range(10)], np.int32)
)
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, "tensors"], 0, enqueue_batch=True)
net.CloseRebatchingQueue([queue], 0)
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
workspace.RunNetOnce(net)
npt.assert_array_equal(
workspace.FetchBlob(results[0]), workspace.FetchBlob("tensors")[:5]
)
npt.assert_array_equal(
workspace.FetchBlob(results[1]), workspace.FetchBlob("tensors")[5:]
)
# Enqueuing more should fail now since the queue is closed
net.EnqueueRebatchingQueue([queue, "tensors"], [], enqueue_batch=True)
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
# Dequeuing more should fail now since the queue is closed
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
def test_rebatching_queue_multiple_components(self):
NUM_BLOBS = 4
NUM_ELEMENTS = 10
net = core.Net('net')
workspace.blobs['complex_tensor'] = np.array(
[[x, x + 1] for x in range(NUM_ELEMENTS)], dtype=np.int32
)
tensors = [
net.GivenTensorIntFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[x for x in range(NUM_ELEMENTS)]
),
net.GivenTensorFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[x * 1.0 for x in range(NUM_ELEMENTS)]
),
net.GivenTensorBoolFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[(x % 2 == 0) for x in range(NUM_ELEMENTS)]
),
'complex_tensor',
]
queue = net.CreateRebatchingQueue(
[], 1, capacity=10, num_blobs=NUM_BLOBS
)
net.EnqueueRebatchingQueue([queue] + tensors, [], enqueue_batch=True)
results = net.DequeueRebatchingQueue([queue], NUM_BLOBS, num_elements=5)
workspace.RunNetOnce(net)
for idx in range(NUM_BLOBS):
npt.assert_array_equal(
workspace.FetchBlob(results[idx]),
workspace.FetchBlob(tensors[idx])[:5]
)
@given(
num_producers=st.integers(1, 5),
num_consumers=st.integers(1, 5),
producer_input_size=st.integers(1, 10),
producer_num_iterations=st.integers(1, 10),
capacity=st.integers(1, 10)
)
@settings(deadline=10000)
def test_rebatching_parallel_producer_consumer(
self, num_producers, num_consumers, producer_input_size,
producer_num_iterations, capacity
):
### Init ###
total_inputs = producer_num_iterations * producer_input_size * num_producers
inputs = []
init_net = core.Net('init_net')
queue = init_net.CreateRebatchingQueue(
[], 1, capacity=capacity, num_blobs=1
)
### Producers ###
producer_steps = []
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
values = [
producer_input_size * i + x for x in range(producer_input_size)
]
for _ in range(producer_num_iterations):
inputs.extend(values)
tensors = net.GivenTensorIntFill(
[], 1, shape=[producer_input_size], values=values
)
net.EnqueueRebatchingQueue([queue, tensors], [], enqueue_batch=True)
step = core.execution_step(
name, net, num_iter=producer_num_iterations
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True
)
]
)
### Consumers ###
outputs = []
def append(ins, outs):
# Extend is atomic
outputs.extend(ins[0].data.tolist())
consumer_steps = []
for i in range(num_consumers):
# This is just a way of deterministally read all the elements.
# We make `num_consumers` almost equal splits
# (the reminder goes to the last consumer).
num_elements_to_read = total_inputs // num_consumers
if i == num_consumers - 1:
num_elements_to_read = num_elements_to_read \
+ total_inputs % num_consumers
# If we have nothing to read this consumer will be idle
if (num_elements_to_read == 0):
continue
# Now we have to make a split on number of iterations and the read
# size for each iteration. This is again just one of many
# deterministic ways of doing it. We factorize the total number of
# elements we have to read and assign half of the factors to the
# iterations half to the read size.
factors = list(primefac(num_elements_to_read))
num_elements_per_iteration = functools.reduce(
lambda x, y: x * y, factors[len(factors) // 2:], 1
)
num_iterations = functools.reduce(
lambda x, y: x * y, factors[:len(factors) // 2], 1
)
name = 'consumer_%d' % i
net = core.Net(name)
blobs = net.DequeueRebatchingQueue(
[queue], 1, num_elements=num_elements_per_iteration
)
net.Python(append)([blobs], 0)
consumer_steps.append(
core.execution_step(name, net, num_iter=num_iterations)
)
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True
)
### Execute Plan ###
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
### Check Results ###
# We check that the outputs are a permutation of inputs
inputs.sort()
outputs.sort()
self.assertEquals(inputs, outputs)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/rebatching_queue_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import itertools as it
import numpy as np
class TestMomentsOp(serial.SerializedTestCase):
def run_moments_test(self, X, axes, keepdims, gc, dc):
if axes is None:
op = core.CreateOperator(
"Moments",
["X"],
["mean", "variance"],
keepdims=keepdims,
)
else:
op = core.CreateOperator(
"Moments",
["X"],
["mean", "variance"],
axes=axes,
keepdims=keepdims,
)
def ref(X):
mean = np.mean(X, axis=None if axes is None else tuple(
axes), keepdims=keepdims)
variance = np.var(X, axis=None if axes is None else tuple(
axes), keepdims=keepdims)
return [mean, variance]
self.assertReferenceChecks(gc, op, [X], ref)
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertGradientChecks(gc, op, [X], 0, [0, 1])
@serial.given(X=hu.tensor(dtype=np.float32), keepdims=st.booleans(),
num_axes=st.integers(1, 4), **hu.gcs)
def test_moments(self, X, keepdims, num_axes, gc, dc):
self.run_moments_test(X, None, keepdims, gc, dc)
num_dims = len(X.shape)
if num_dims < num_axes:
self.run_moments_test(X, range(num_dims), keepdims, gc, dc)
else:
for axes in it.combinations(range(num_dims), num_axes):
self.run_moments_test(X, axes, keepdims, gc, dc)
|
pytorch-master
|
caffe2/python/operator_test/moments_op_test.py
|
from caffe2.python import workspace, core, rnn_cell
from caffe2.python.model_helper import ModelHelper
from caffe2.python.rnn.rnn_cell_test_util import tanh
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
def basic_rnn_reference(input, hidden_initial,
i2h_w, i2h_b,
gate_w, gate_b,
seq_lengths,
drop_states,
use_sequence_lengths):
D = hidden_initial.shape[-1]
T = input.shape[0]
N = input.shape[1]
if seq_lengths is not None:
seq_lengths = (np.ones(shape=(N, D)) *
seq_lengths.reshape(N, 1)).astype(np.int32)
ret = []
hidden_prev = hidden_initial
for t in range(T):
input_fc = np.dot(input[t], i2h_w.T) + i2h_b
recur_fc = np.dot(hidden_prev, gate_w.T) + gate_b
hidden_t = tanh(input_fc + recur_fc)
if seq_lengths is not None:
valid = (t < seq_lengths).astype(np.int32)
assert valid.shape == (N, D), (valid.shape, (N, D))
hidden_t = hidden_t * valid + \
hidden_prev * (1 - valid) * (1 - drop_states)
ret.append(hidden_t)
hidden_prev = hidden_t
return ret
class BasicRNNCellTest(hu.HypothesisTestCase):
@given(
seed=st.integers(0, 2**32 - 1),
seq_length=st.integers(min_value=1, max_value=5),
batch_size=st.integers(min_value=1, max_value=5),
input_size=st.integers(min_value=1, max_value=5),
hidden_size=st.integers(min_value=1, max_value=5),
drop_states=st.booleans(),
sequence_lengths=st.booleans(),
**hu.gcs
)
@ht_settings(max_examples=15)
def test_basic_rnn(self, seed, seq_length, batch_size, input_size, hidden_size,
drop_states, sequence_lengths, gc, dc):
np.random.seed(seed)
seq_lengths_data = np.random.randint(
1, seq_length + 1, size=(batch_size,)).astype(np.int32)
input_blob_data = np.random.randn(
seq_length, batch_size, input_size).astype(np.float32)
initial_h_data = np.random.randn(
batch_size, hidden_size).astype(np.float32)
gates_t_w_data = np.random.randn(
hidden_size, hidden_size).astype(np.float32)
gates_t_b_data = np.random.randn(
hidden_size).astype(np.float32)
i2h_w_data = np.random.randn(
hidden_size, input_size).astype(np.float32)
i2h_b_data = np.random.randn(
hidden_size).astype(np.float32)
with core.DeviceScope(gc):
with hu.temp_workspace():
workspace.FeedBlob(
'input_blob', input_blob_data, device_option=gc)
workspace.FeedBlob(
'seq_lengths', seq_lengths_data, device_option=gc)
workspace.FeedBlob(
'initial_h', initial_h_data, device_option=gc)
workspace.FeedBlob(
'basic_rnn/gates_t_w', gates_t_w_data, device_option=gc)
workspace.FeedBlob(
'basic_rnn/gates_t_b', gates_t_b_data, device_option=gc)
workspace.FeedBlob(
'basic_rnn/i2h_w', i2h_w_data, device_option=gc)
workspace.FeedBlob(
'basic_rnn/i2h_b', i2h_b_data, device_option=gc)
model = ModelHelper(name='model')
hidden_t_all, _ = rnn_cell.BasicRNN(
model,
'input_blob',
'seq_lengths' if sequence_lengths else None,
['initial_h'],
input_size,
hidden_size,
"basic_rnn",
activation='tanh',
forward_only=True,
drop_states=drop_states)
workspace.RunNetOnce(model.net)
result = workspace.FetchBlob(hidden_t_all)
reference = basic_rnn_reference(
input_blob_data,
initial_h_data,
i2h_w_data,
i2h_b_data,
gates_t_w_data,
gates_t_b_data,
seq_lengths_data if sequence_lengths else None,
drop_states=drop_states,
use_sequence_lengths=sequence_lengths
)
np.testing.assert_allclose(result, reference, atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
])
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/basic_rnn_test.py
|
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
):
n = len(y)
def get_discounts(v):
x = np.argsort(v)
d = [0 for _ in range(n)]
for i in range(n):
d[x[i]] = 1.0 / np.log2(n - i + 1.0)
return d
def sigm(x):
return 1 / (1 + np.exp(-x))
def log_sigm(x):
return -np.log(1 + np.exp(-x))
dy = np.zeros(n)
loss = 0
if np.sum(np.abs(r)) < 1e-6:
return loss, dy
if use_ndcg_as_loss and (not use_exp_gain):
g = [r[i] for i in range(n)]
else:
g = [2 ** r[i] for i in range(n)]
d = get_discounts(r)
idcg = sum([g[i] * d[i] for i in range(n)])
if use_idcg_normalization:
session_weight = max(idcg, 1e-5)
else:
session_weight = 1
d = get_discounts(y)
if use_ndcg_as_loss:
dcg = sum(g[i] * d[i] for i in range(n))
loss = (idcg - dcg) / session_weight
for i in range(n):
for j in range(n):
if i == j:
continue
lambda_weight = np.abs((g[i] - g[j]) * (d[i] - d[j]))
rank_loss = -log_sigm(y[i] - y[j] if r[i] > r[j] else y[j] - y[i])
rank_dy = (0.0 if r[i] > r[j] else 1.0) - sigm(-y[i] + y[j])
if not use_ndcg_as_loss:
loss += lambda_weight * rank_loss / session_weight
dy[i] += lambda_weight * rank_dy / session_weight
return loss, dy
@given(n=st.integers(1, 20), k=st.integers(2, 5), m=st.integers(3, 5))
def test_lambda_rank_loss(self, n, k, m):
y = np.random.rand(n * m).astype(np.float32)
r = np.random.randint(k, size=n * m).astype(np.float32)
# m sessions of length n
session_lengths = np.repeat(n, m).astype(np.int32)
ref_loss = np.empty(0)
ref_ndcg_loss = np.empty(0)
ref_ndcg_loss_no_exp = np.empty(0)
ref_dcg_loss = np.empty(0)
ref_dcg_loss_no_exp = np.empty(0)
ref_dy = np.empty(0)
ref_dy_no_exp = np.empty(0)
ref_dcg_dy = np.empty(0)
ref_dcg_dy_no_exp = np.empty(0)
for i in range(m):
r_loss, r_dy = self.ref_lambda_rank_loss(
y[(i) * n : (i + 1) * n], r[(i) * n : (i + 1) * n], False, True, False
)
r_ndcg_loss, _ = self.ref_lambda_rank_loss(
y[(i) * n : (i + 1) * n], r[(i) * n : (i + 1) * n], True, True, True
)
r_ndcg_loss_no_exp, r_dy_no_exp = self.ref_lambda_rank_loss(
y[(i) * n : (i + 1) * n], r[(i) * n : (i + 1) * n], True, True, False
)
r_dcg_loss, r_dcg_dy = self.ref_lambda_rank_loss(
y[(i) * n : (i + 1) * n], r[(i) * n : (i + 1) * n], True, False, True
)
r_dcg_loss_no_exp, r_dcg_dy_no_exp = self.ref_lambda_rank_loss(
y[(i) * n : (i + 1) * n], r[(i) * n : (i + 1) * n], True, False, False
)
ref_loss = np.append(ref_loss, r_loss)
ref_dy = np.append(ref_dy, r_dy)
ref_ndcg_loss = np.append(ref_ndcg_loss, r_ndcg_loss)
ref_ndcg_loss_no_exp = np.append(ref_ndcg_loss_no_exp, r_ndcg_loss_no_exp)
ref_dy_no_exp = np.append(ref_dy_no_exp, r_dy_no_exp)
ref_dcg_loss = np.append(ref_dcg_loss, r_dcg_loss)
ref_dcg_dy = np.append(ref_dcg_dy, r_dcg_dy)
ref_dcg_loss_no_exp = np.append(ref_dcg_loss_no_exp, r_dcg_loss_no_exp)
ref_dcg_dy_no_exp = np.append(ref_dcg_dy_no_exp, r_dcg_dy_no_exp)
dloss = np.random.random(m).astype(np.float32)
workspace.blobs["y"] = y
workspace.blobs["r"] = r
workspace.blobs["session_lengths"] = session_lengths
workspace.blobs["dloss"] = dloss
op = core.CreateOperator(
"LambdaRankNdcg",
["y", "r", "session_lengths"],
["loss", "dy"],
use_ndcg_as_loss=False,
use_idcg_normalization=True,
use_exp_gain=False,
)
workspace.RunOperatorOnce(op)
loss = workspace.blobs["loss"]
dy = workspace.blobs["dy"]
np.testing.assert_allclose(loss, ref_loss, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dy, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
"LambdaRankNdcg",
["y", "r", "session_lengths"],
["loss", "dy"],
use_ndcg_as_loss=True,
use_idcg_normalization=True,
use_exp_gain=True,
)
workspace.RunOperatorOnce(op)
loss = workspace.blobs["loss"]
dy = workspace.blobs["dy"]
np.testing.assert_allclose(loss, ref_ndcg_loss, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dy, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
"LambdaRankNdcgGradient",
["y", "session_lengths", "dy", "dloss"],
["dy_back"],
)
workspace.RunOperatorOnce(op)
dy_back = workspace.blobs["dy_back"]
for i in range(m):
np.testing.assert_allclose(
dy_back[i * n : (i + 1) * n],
dloss[i] * ref_dy[i * n : (i + 1) * n],
rtol=1e-5,
atol=1e-6,
)
op = core.CreateOperator(
"LambdaRankNdcg",
["y", "r", "session_lengths"],
["loss", "dy"],
use_ndcg_as_loss=True,
use_idcg_normalization=True,
use_exp_gain=False,
)
workspace.RunOperatorOnce(op)
loss = workspace.blobs["loss"]
dy = workspace.blobs["dy"]
np.testing.assert_allclose(loss, ref_ndcg_loss_no_exp, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dy_no_exp, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
"LambdaRankNdcgGradient",
["y", "session_lengths", "dy", "dloss"],
["dy_back"],
)
workspace.RunOperatorOnce(op)
dy_back = workspace.blobs["dy_back"]
for i in range(m):
np.testing.assert_allclose(
dy_back[i * n : (i + 1) * n],
dloss[i] * ref_dy_no_exp[i * n : (i + 1) * n],
rtol=1e-5,
atol=1e-6,
)
op = core.CreateOperator(
"LambdaRankNdcg",
["y", "r", "session_lengths"],
["loss", "dy"],
use_ndcg_as_loss=True,
use_idcg_normalization=False,
use_exp_gain=True,
)
workspace.RunOperatorOnce(op)
loss = workspace.blobs["loss"]
dy = workspace.blobs["dy"]
np.testing.assert_allclose(loss, ref_dcg_loss, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dcg_dy, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
"LambdaRankNdcgGradient",
["y", "session_lengths", "dy", "dloss"],
["dy_back"],
)
workspace.RunOperatorOnce(op)
dy_back = workspace.blobs["dy_back"]
for i in range(m):
np.testing.assert_allclose(
dy_back[i * n : (i + 1) * n],
dloss[i] * ref_dcg_dy[i * n : (i + 1) * n],
rtol=1e-5,
atol=1e-6,
)
op = core.CreateOperator(
"LambdaRankNdcg",
["y", "r", "session_lengths"],
["loss", "dy"],
use_ndcg_as_loss=True,
use_idcg_normalization=False,
use_exp_gain=False,
)
workspace.RunOperatorOnce(op)
loss = workspace.blobs["loss"]
dy = workspace.blobs["dy"]
np.testing.assert_allclose(loss, ref_dcg_loss_no_exp, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dcg_dy_no_exp, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
"LambdaRankNdcgGradient",
["y", "session_lengths", "dy", "dloss"],
["dy_back"],
)
workspace.RunOperatorOnce(op)
dy_back = workspace.blobs["dy_back"]
for i in range(m):
np.testing.assert_allclose(
dy_back[i * n : (i + 1) * n],
dloss[i] * ref_dcg_dy_no_exp[i * n : (i + 1) * n],
rtol=1e-5,
atol=1e-6,
)
|
pytorch-master
|
caffe2/python/operator_test/listwise_l2r_operator_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
class TestClipTensorByScalingOp(serial.SerializedTestCase):
@given(n=st.integers(5, 8), d=st.integers(2, 4),
threshold=st.floats(0.1, 10),
additional_threshold=st.floats(0.1, 10),
use_additional_threshold=st.booleans(),
inplace=st.booleans(),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_clip_tensor_by_scaling(self, n, d, threshold, additional_threshold,
use_additional_threshold, inplace, gc, dc):
tensor = np.random.rand(n, d).astype(np.float32)
val = np.array(np.linalg.norm(tensor))
additional_threshold = np.array([additional_threshold]).astype(np.float32)
def clip_tensor_by_scaling_ref(tensor_data, val_data,
additional_threshold=None):
if additional_threshold is not None:
final_threshold = threshold * additional_threshold
else:
final_threshold = threshold
if val_data > final_threshold:
ratio = final_threshold / float(val_data)
tensor_data = tensor_data * ratio
return [tensor_data]
op = core.CreateOperator(
"ClipTensorByScaling",
["tensor", "val"] if not use_additional_threshold else (
["tensor", "val", "additional_threshold"]),
['Y'] if not inplace else ["tensor"],
threshold=threshold,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[tensor, val] if not use_additional_threshold else (
[tensor, val, additional_threshold]),
reference=clip_tensor_by_scaling_ref,
)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/clip_tensor_op_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
@st.composite
def id_list_batch(draw):
num_inputs = draw(st.integers(1, 3))
batch_size = draw(st.integers(5, 10))
values_dtype = draw(st.sampled_from([np.int32, np.int64]))
inputs = []
for _ in range(num_inputs):
size = draw(st.integers(5, 10))
values = draw(hnp.arrays(values_dtype, size, st.integers(1, 10)))
lengths = draw(hu.lengths(len(values),
min_segments=batch_size,
max_segments=batch_size))
inputs.append(lengths)
inputs.append(values)
return inputs
def merge_id_lists_ref(*args):
n = len(args)
assert n > 0
assert n % 2 == 0
batch_size = len(args[0])
num_inputs = int(n / 2)
lengths = np.array([np.insert(args[2 * i], 0, 0)
for i in range(num_inputs)])
values = [args[2 * i + 1] for i in range(num_inputs)]
offsets = [np.cumsum(lengths[j]) for j in range(num_inputs)]
def merge_arrays(vs, offs, j):
concat = np.concatenate([vs[i][offs[i][j]:offs[i][j + 1]]
for i in range(num_inputs)])
return np.sort(np.unique(concat))
merged = [merge_arrays(values, offsets, j) for j in range(batch_size)]
merged_lengths = np.array([len(x) for x in merged])
merged_values = np.concatenate(merged)
return merged_lengths, merged_values
class TestMergeIdListsOp(serial.SerializedTestCase):
def test_merge_id_lists_ref(self):
# Verify that the reference implementation is correct!
lengths_0 = np.array([3, 0, 4], dtype=np.int32)
values_0 = np.array([1, 5, 6, 2, 4, 5, 6], dtype=np.int64)
lengths_1 = np.array([3, 2, 1], dtype=np.int32)
values_1 = np.array([5, 8, 9, 14, 9, 5], dtype=np.int64)
merged_lengths, merged_values = merge_id_lists_ref(
lengths_0, values_0, lengths_1, values_1)
expected_lengths = np.array([5, 2, 4], dtype=np.int32)
expected_values = np.array([1, 5, 6, 8, 9, 9, 14, 2, 4, 5, 6], dtype=np.int64)
np.testing.assert_array_equal(merged_lengths, expected_lengths)
np.testing.assert_array_equal(merged_values, expected_values)
@serial.given(inputs=id_list_batch(), **hu.gcs_cpu_only)
def test_merge_id_lists_op(self, inputs, gc, dc):
num_inputs = int(len(inputs) / 2)
op = core.CreateOperator(
"MergeIdLists",
["{prefix}_{i}".format(prefix=p, i=i)
for i in range(num_inputs)
for p in ["lengths", "values"]],
["merged_lengths", "merged_values"]
)
self.assertDeviceChecks(dc, op, inputs, [0])
self.assertReferenceChecks(gc, op, inputs, merge_id_lists_ref)
|
pytorch-master
|
caffe2/python/operator_test/merge_id_lists_op_test.py
|
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import unittest
class TestSoftplus(hu.HypothesisTestCase):
@given(X=hu.tensor(),
**hu.gcs)
@settings(deadline=10000)
def test_softplus(self, X, gc, dc):
op = core.CreateOperator("Softplus", ["X"], ["Y"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/softplus_op_test.py
|
from hypothesis import given
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFlatten(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=2, max_dim=4),
**hu.gcs)
def test_flatten(self, X, gc, dc):
for axis in range(X.ndim + 1):
op = core.CreateOperator(
"Flatten",
["X"],
["Y"],
axis=axis)
def flatten_ref(X):
shape = X.shape
outer = np.prod(shape[:axis]).astype(int)
inner = np.prod(shape[axis:]).astype(int)
return np.copy(X).reshape(outer, inner),
self.assertReferenceChecks(gc, op, [X], flatten_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/flatten_op_test.py
|
from caffe2.python import core, workspace
from hypothesis import assume, given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestReductionOps(serial.SerializedTestCase):
@serial.given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@given(n=st.integers(5, 8), **hu.gcs)
@settings(deadline=10000)
def test_elementwise_int_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.int32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElementsInt",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
@given(n=st.integers(1, 65536),
dtype=st.sampled_from([np.float32, np.float16]),
**hu.gcs)
@settings(deadline=10000)
def test_elementwise_sqrsum(self, n, dtype, gc, dc):
if dtype == np.float16:
# fp16 is only supported with CUDA/HIP
assume(gc.device_type == workspace.GpuDeviceType)
dc = [d for d in dc if d.device_type == workspace.GpuDeviceType]
X = np.random.rand(n).astype(dtype)
def sumsqr_op(X):
return [np.sum(X * X)]
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
threshold = 0.01 if dtype == np.float16 else 0.005
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sumsqr_op,
threshold=threshold,
)
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_avg(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def avg_op(X):
return [np.mean(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"],
average=1
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=avg_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@serial.given(batch_size=st.integers(1, 3),
m=st.integers(1, 3),
n=st.integers(1, 4),
**hu.gcs)
def test_rowwise_max(self, batch_size, m, n, gc, dc):
X = np.random.rand(batch_size, m, n).astype(np.float32)
def rowwise_max(X):
return [np.max(X, axis=2)]
op = core.CreateOperator(
"RowwiseMax",
["x"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=rowwise_max,
)
@serial.given(batch_size=st.integers(1, 3),
m=st.integers(1, 3),
n=st.integers(1, 4),
**hu.gcs)
def test_columnwise_max(self, batch_size, m, n, gc, dc):
X = np.random.rand(batch_size, m, n).astype(np.float32)
def columnwise_max(X):
return [np.max(X, axis=1)]
op = core.CreateOperator(
"ColwiseMax",
["x"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=columnwise_max,
)
# Test shape inference logic
net = core.Net("test_shape_inference")
workspace.FeedBlob("x", X)
output = net.ColwiseMax(["x"], ["y"])
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertEqual(shapes[output], list(workspace.blobs[output].shape))
self.assertEqual(shapes[output], [X.shape[0]] + [X.shape[2]])
self.assertEqual(types[output], core.DataType.FLOAT)
|
pytorch-master
|
caffe2/python/operator_test/reduction_ops_test.py
|
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
class TestCTCGreedyDecoderOp(serial.SerializedTestCase):
@given(
batch=st.sampled_from([2, 4, 128, 256]),
max_time=st.sampled_from([2, 10, 30, 50]),
num_classes=st.sampled_from([2, 10, 26, 40]),
merge_repeated=st.sampled_from([True, False]),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_ctc_greedy_decoder(
self, batch, max_time,
num_classes, merge_repeated, gc, dc
):
def input_generater():
inputs = np.random.rand(max_time, batch, num_classes)\
.astype(np.float32)
seq_len = np.random.randint(1, max_time + 1, size=batch)\
.astype(np.int32)
return inputs, seq_len
def ref_ctc_decoder(inputs, seq_len):
merge = merge_repeated
output_len = np.array([]).astype(np.int32)
val = np.array([]).astype(np.int32)
for i in range(batch):
prev_id = 0
t_dec = 0
len_i = seq_len[i] if seq_len is not None else max_time
for t in range(len_i):
max_id = np.argmax(inputs[t, i, :])
if max_id == 0:
prev_id = max_id
continue
if max_id == prev_id and merge:
prev_id = max_id
continue
t_dec += 1
val = np.append(val, max_id)
prev_id = max_id
output_len = np.append(output_len, t_dec)
return [output_len, val]
def ref_ctc_decoder_max_time(inputs):
return ref_ctc_decoder(inputs, None)
inputs, seq_len = input_generater()
op = core.CreateOperator('CTCGreedyDecoder',
['INPUTS', 'SEQ_LEN'],
['OUTPUT_LEN', 'VALUES'],
merge_repeated=merge_repeated)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[inputs, seq_len],
reference=ref_ctc_decoder,
)
op_1 = core.CreateOperator('CTCGreedyDecoder',
['INPUTS'],
['OUTPUT_LEN', 'VALUES'],
merge_repeated=merge_repeated)
self.assertReferenceChecks(
device_option=gc,
op=op_1,
inputs=[inputs],
reference=ref_ctc_decoder_max_time,
)
@given(
batch=st.sampled_from([2, 4, 128, 256]),
max_time=st.sampled_from([2, 10, 30, 50]),
num_classes=st.sampled_from([2, 10, 26, 40]),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_ctc_greedy_decoder_no_merge_arg(
self, batch, max_time,
num_classes, gc, dc
):
def input_generater():
inputs = np.random.rand(max_time, batch, num_classes)\
.astype(np.float32)
seq_len = np.random.randint(1, max_time + 1, size=batch)\
.astype(np.int32)
return inputs, seq_len
def ref_ctc_decoder_no_merge_arg(inputs, seq_len):
merge = True
output_len = np.array([]).astype(np.int32)
val = np.array([]).astype(np.int32)
for i in range(batch):
prev_id = 0
t_dec = 0
len_i = seq_len[i] if seq_len is not None else max_time
for t in range(len_i):
max_id = np.argmax(inputs[t, i, :])
if max_id == 0:
prev_id = max_id
continue
if max_id == prev_id and merge:
prev_id = max_id
continue
t_dec += 1
val = np.append(val, max_id)
prev_id = max_id
output_len = np.append(output_len, t_dec)
return [output_len, val]
def ref_ctc_decoder_max_time(inputs):
return ref_ctc_decoder_no_merge_arg(inputs, None)
inputs, seq_len = input_generater()
op = core.CreateOperator('CTCGreedyDecoder',
['INPUTS'],
['OUTPUT_LEN', 'VALUES'])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[inputs],
reference=ref_ctc_decoder_max_time,
)
if __name__ == "__main__":
import random
random.seed(2603)
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/ctc_greedy_decoder_op_test.py
|
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
class TestBucketizeOp(hu.HypothesisTestCase):
@given(
x=hu.tensor(
min_dim=1, max_dim=2, dtype=np.float32,
elements=hu.floats(min_value=-5, max_value=5)),
**hu.gcs)
def test_bucketize_op(self, x, gc, dc):
length = np.random.randint(low=1, high=5)
boundaries = np.random.randn(length) * 5
boundaries.sort()
def ref(x, boundaries):
bucket_idx = np.digitize(x, boundaries, right=True)
return [bucket_idx]
op = core.CreateOperator('Bucketize',
["X"], ["INDICES"],
boundaries=boundaries)
self.assertReferenceChecks(gc, op, [x, boundaries], ref)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/bucketize_op_test.py
|
import numpy as np
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestClip(serial.SerializedTestCase):
@given(X=hu.tensor(min_dim=0),
min_=st.floats(min_value=-2, max_value=0),
max_=st.floats(min_value=0, max_value=2),
inplace=st.booleans(),
**hu.gcs)
@settings(deadline=10000)
def test_clip(self, X, min_, max_, inplace, gc, dc):
# go away from the origin point to avoid kink problems
if np.isscalar(X):
X = np.array([], dtype=np.float32)
else:
X[np.abs(X - min_) < 0.05] += 0.1
X[np.abs(X - max_) < 0.05] += 0.1
def clip_ref(X):
X = X.clip(min_, max_)
return (X,)
op = core.CreateOperator(
"Clip",
["X"], ["Y" if not inplace else "X"],
min=min_,
max=max_)
self.assertReferenceChecks(gc, op, [X], clip_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(min_dim=0),
inplace=st.booleans(),
**hu.gcs)
def test_clip_default(self, X, inplace, gc, dc):
# go away from the origin point to avoid kink problems
if np.isscalar(X):
X = np.array([], dtype=np.float32)
else:
X += 0.04 * np.sign(X)
def clip_ref(X):
return (X,)
op = core.CreateOperator(
"Clip",
["X"], ["Y" if not inplace else "X"])
self.assertReferenceChecks(gc, op, [X], clip_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/clip_op_test.py
|
from caffe2.python import brew, core, utils, workspace
import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu
from caffe2.python.model_helper import ModelHelper
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, assume, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestSpatialBN(serial.SerializedTestCase):
@serial.given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(0, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
inplace=st.booleans(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_test_mode_3d(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine=engine,
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :,
np.newaxis, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(batch_size, input_channels, size, size, size)\
.astype(np.float32) - 0.5
if order == "NHWC":
X = utils.NCHW2NHWC(X)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(0, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
inplace=st.booleans(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_test_mode_1d(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
# Currently MIOPEN SpatialBN only supports 2D
if hiputl.run_in_hip(gc, dc):
assume(engine != "CUDNN")
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine=engine,
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis]
var = var[np.newaxis, :, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(0, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from(["", "CUDNN"]),
inplace=st.booleans(),
**hu.gcs)
def test_spatialbn_test_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
# Currently HIP SpatialBN only supports NCHW
if hiputl.run_in_hip(gc, dc):
assume(order == "NCHW")
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine=engine
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@given(size=st.integers(1, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(0, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(1e-5, 1e-2),
momentum=st.floats(0.5, 0.9),
engine=st.sampled_from(["", "CUDNN"]),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
momentum, inplace, engine, gc, dc):
# Currently HIP SpatialBN only supports NCHW
if hiputl.run_in_hip(gc, dc):
assume(order == "NCHW")
assume(batch_size == 0 or batch_size * size * size > 1)
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["X" if inplace else "Y",
"running_mean", "running_var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
momentum=momentum,
engine=engine,
)
np.random.seed(1701)
scale = np.random.randn(input_channels).astype(np.float32)
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.randn(
batch_size, input_channels, size, size).astype(np.float32)
if order == "NHWC":
X = np.transpose(X, (0, 2, 3, 1))
def batch_norm_ref(X, scale, bias, running_mean, running_var):
if batch_size == 0:
Y = np.zeros(X.shape)
saved_mean = np.zeros(running_mean.shape)
saved_var = np.zeros(running_var.shape)
return (Y, running_mean, running_var, saved_mean, saved_var)
if order == "NHWC":
X = np.transpose(X, (0, 3, 1, 2))
C = X.shape[1]
reduce_size = batch_size * size * size
saved_mean = np.mean(X, (0, 2, 3))
saved_var = np.var(X, (0, 2, 3))
if reduce_size == 1:
unbias_scale = float('inf')
else:
unbias_scale = reduce_size / (reduce_size - 1)
running_mean = momentum * running_mean + (
1.0 - momentum) * saved_mean
running_var = momentum * running_var + (
1.0 - momentum) * unbias_scale * saved_var
std = np.sqrt(saved_var + epsilon)
broadcast_shape = (1, C, 1, 1)
Y = (X - np.reshape(saved_mean, broadcast_shape)) / np.reshape(
std, broadcast_shape) * np.reshape(
scale, broadcast_shape) + np.reshape(bias, broadcast_shape)
if order == "NHWC":
Y = np.transpose(Y, (0, 2, 3, 1))
return (Y, running_mean, running_var, saved_mean, 1.0 / std)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
batch_norm_ref)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var],
[0, 1, 2, 3, 4])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(0, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
momentum=st.floats(0.5, 0.9),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_spatialbn_train_mode_gradient_check(
self, size, input_channels, batch_size, seed, order, epsilon,
momentum, engine, gc, dc):
# Currently HIP SpatialBN only supports NCHW
if hiputl.run_in_hip(gc, dc):
assume(order == "NCHW")
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
momentum=momentum,
engine=engine
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
for input_to_check in [0, 1, 2]: # dX, dScale, dBias
self.assertGradientChecks(gc, op, [X, scale, bias, mean, var],
input_to_check, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(0, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
momentum=st.floats(min_value=0.5, max_value=0.9),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=10000)
def test_spatialbn_train_mode_gradient_check_1d(
self, size, input_channels, batch_size, seed, order, epsilon,
momentum, engine, gc, dc):
# Currently MIOPEN SpatialBN only supports 2D
if hiputl.run_in_hip(gc, dc):
assume(engine != "CUDNN")
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
momentum=momentum,
engine=engine,
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2)
for input_to_check in [0, 1, 2]: # dX, dScale, dBias
self.assertGradientChecks(gc, op, [X, scale, bias, mean, var],
input_to_check, [0], stepsize=0.01)
@given(N=st.integers(0, 5),
C=st.integers(1, 10),
H=st.integers(1, 5),
W=st.integers(1, 5),
epsilon=st.floats(1e-5, 1e-2),
momentum=st.floats(0.5, 0.9),
order=st.sampled_from(["NCHW", "NHWC"]),
num_batches=st.integers(2, 5),
in_place=st.booleans(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatial_bn_multi_batch(
self, N, C, H, W, epsilon, momentum, order, num_batches, in_place,
engine, gc, dc):
if in_place:
outputs = ["Y", "mean", "var", "batch_mean", "batch_var"]
else:
outputs = ["Y", "mean", "var", "saved_mean", "saved_var"]
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var", "batch_mean", "batch_var"],
outputs,
order=order,
is_test=False,
epsilon=epsilon,
momentum=momentum,
num_batches=num_batches,
engine=engine,
)
if order == "NCHW":
X = np.random.randn(N, C, H, W).astype(np.float32)
else:
X = np.random.randn(N, H, W, C).astype(np.float32)
scale = np.random.randn(C).astype(np.float32)
bias = np.random.randn(C).astype(np.float32)
mean = np.random.randn(C).astype(np.float32)
var = np.random.rand(C).astype(np.float32)
batch_mean = np.random.rand(C).astype(np.float32) - 0.5
batch_var = np.random.rand(C).astype(np.float32) + 1.0
inputs = [X, scale, bias, mean, var, batch_mean, batch_var]
def spatial_bn_multi_batch_ref(
X, scale, bias, mean, var, batch_mean, batch_var):
if N == 0:
batch_mean = np.zeros(C).astype(np.float32)
batch_var = np.zeros(C).astype(np.float32)
else:
size = num_batches * N * H * W
batch_mean /= size
batch_var = batch_var / size - np.square(batch_mean)
mean = momentum * mean + (1.0 - momentum) * batch_mean
var = momentum * var + (1.0 - momentum) * (
size / (size - 1)) * batch_var
batch_var = 1.0 / np.sqrt(batch_var + epsilon)
if order == "NCHW":
scale = np.reshape(scale, (C, 1, 1))
bias = np.reshape(bias, (C, 1, 1))
batch_mean = np.reshape(batch_mean, (C, 1, 1))
batch_var = np.reshape(batch_var, (C, 1, 1))
Y = (X - batch_mean) * batch_var * scale + bias
if order == "NCHW":
batch_mean = np.reshape(batch_mean, (C))
batch_var = np.reshape(batch_var, (C))
return (Y, mean, var, batch_mean, batch_var)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=spatial_bn_multi_batch_ref,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2, 3, 4])
@given(N=st.integers(0, 5),
C=st.integers(1, 10),
H=st.integers(1, 5),
W=st.integers(1, 5),
epsilon=st.floats(1e-5, 1e-2),
order=st.sampled_from(["NCHW", "NHWC"]),
num_batches=st.integers(2, 5),
in_place=st.booleans(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=None)
def test_spatial_bn_multi_batch_grad(
self, N, C, H, W, epsilon, order, num_batches, in_place, engine,
gc, dc):
if in_place:
outputs = ["dX", "dscale_sum", "dbias_sum"]
else:
outputs = ["dX", "dscale", "dbias"]
op = core.CreateOperator(
"SpatialBNGradient",
["X", "scale", "dY", "mean", "rstd", "dscale_sum", "dbias_sum"],
outputs,
order=order,
epsilon=epsilon,
num_batches=num_batches,
engine=engine,
)
if order == "NCHW":
dY = np.random.randn(N, C, H, W).astype(np.float32)
X = np.random.randn(N, C, H, W).astype(np.float32)
else:
dY = np.random.randn(N, H, W, C).astype(np.float32)
X = np.random.randn(N, H, W, C).astype(np.float32)
scale = np.random.randn(C).astype(np.float32)
mean = np.random.randn(C).astype(np.float32)
rstd = np.random.rand(C).astype(np.float32)
dscale_sum = np.random.randn(C).astype(np.float32)
dbias_sum = np.random.randn(C).astype(np.float32)
inputs = [X, scale, dY, mean, rstd, dscale_sum, dbias_sum]
def spatial_bn_multi_batch_grad_ref(
X, scale, dY, mean, rstd, dscale_sum, dbias_sum):
if N == 0:
dscale = np.zeros(C).astype(np.float32)
dbias = np.zeros(C).astype(np.float32)
alpha = np.zeros(C).astype(np.float32)
beta = np.zeros(C).astype(np.float32)
gamma = np.zeros(C).astype(np.float32)
else:
dscale = dscale_sum / num_batches
dbias = dbias_sum / num_batches
alpha = scale * rstd
beta = -alpha * dscale * rstd / (N * H * W)
gamma = alpha * (mean * dscale * rstd - dbias) / (N * H * W)
if order == "NCHW":
alpha = np.reshape(alpha, (C, 1, 1))
beta = np.reshape(beta, (C, 1, 1))
gamma = np.reshape(gamma, (C, 1, 1))
dX = alpha * dY + beta * X + gamma
return (dX, dscale, dbias)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=spatial_bn_multi_batch_grad_ref,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(0, 3),
seed=st.integers(0, 65535),
epsilon=st.floats(1e-5, 1e-2),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_brew_wrapper(
self, size, input_channels, batch_size, seed, epsilon,
engine, gc, dc):
np.random.seed(seed)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
workspace.FeedBlob('X', X)
model = ModelHelper(name='test_spatialbn_brew_wrapper')
brew.spatial_bn(
model,
'X',
'Y',
input_channels,
epsilon=epsilon,
is_test=False,
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/spatial_bn_op_test.py
|
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils, workspace
from hypothesis import assume, given
def _cudnn_supports(dilation=False, nhwc=False):
"""Return True if cuDNN supports this configuration."""
v = workspace.GetCuDNNVersion()
if dilation and v < 6000:
# Dilation not supported until v6
return False
if dilation and nhwc:
# Dilation and NHWC not supported together
return False
return True
def _conv_1d_output_size(size, kernel, pad, dilation, stride):
return max(1, int((size + pad * 2 - (dilation * (kernel - 1) + 1)) / stride) + 1)
def _conv_2d_output_size(size, kernel, pad_h, pad_w, dilation, stride_h, stride_w):
return [
_conv_1d_output_size(size, kernel, pad_h, dilation, stride_h),
_conv_1d_output_size(size, kernel, pad_w, dilation, stride_w),
]
def _conv_2d_offsets_dims(
batch_size,
size,
kernel,
pad_h,
pad_w,
dilation,
stride_h,
stride_w,
deformable_group,
):
dims = [batch_size, 2 * kernel * kernel * deformable_group]
dims.extend(
_conv_2d_output_size(size, kernel, pad_h, pad_w, dilation, stride_h, stride_w)
)
return dims
def _conv_2d_random_offsets(batch_size, kernel, dims, num_deformable_group):
o = []
for y0 in range(0, kernel):
for x0 in range(0, kernel):
# stay away from integer offsets which correspond to "ridges" on the
# interpolated surface resulting in less precise estimates
x = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)
y = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)
o.append(y - y0)
o.append(x - x0)
o = o * num_deformable_group
e = []
for v in o:
e.append([[v] * dims[1]] * dims[0])
return np.array([e] * batch_size).astype(np.float32)
def _conv_2d_shuffle_offsets(
batch_size, kernel, dims, num_deformable_group, input_channels, output_channels
):
o = []
w0 = [[0 for x in range(kernel)] for y in range(kernel)]
for y0 in range(0, kernel):
for x0 in range(0, kernel):
x = np.random.randint(0, kernel)
y = np.random.randint(0, kernel)
o.append(y - y0)
o.append(x - x0)
w0[y][x] += 1
o = o * num_deformable_group
e = []
for v in o:
e.append([[v] * int(dims[1])] * int(dims[0]))
w0 = [[w0] * input_channels] * output_channels
return (
np.array([e] * batch_size).astype(np.float32),
utils.NCHW2NHWC(np.array(w0).astype(np.float32)),
)
class TestConvolution(hu.HypothesisTestCase):
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only
)
def test_null_offset_convolution(
self,
stride,
pad,
kernel,
dilation,
size,
input_channels,
output_channels,
batch_size,
order,
engine,
use_bias,
deformable_group,
gc,
dc,
):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN":
assume(_cudnn_supports(dilation=(dilation > 1), nhwc=(order == "NHWC")))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
offset_dims = _conv_2d_offsets_dims(
batch_size,
size,
kernel,
pad,
pad,
dilation,
stride,
stride,
deformable_group,
)
X = (
np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
- 0.5
)
o = np.zeros(tuple(offset_dims), np.float32)
w = (
np.random.rand(output_channels, kernel, kernel, input_channels).astype(
np.float32
)
- 0.5
)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc,
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride=st.integers(1, 3),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 4),
**hu.gcs_gpu_only
)
def test_flat_input_convolution(
self,
stride,
pad,
kernel,
dilation,
size,
input_channels,
output_channels,
batch_size,
order,
engine,
use_bias,
deformable_group,
gc,
dc,
):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN":
assume(_cudnn_supports(dilation=(dilation > 1), nhwc=(order == "NHWC")))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = np.ones((batch_size, size, size, input_channels), np.float32) - 0.5
output_size = _conv_2d_output_size(
size, kernel, pad, pad, dilation, stride, stride
)
o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)
w = np.ones((output_channels, kernel, kernel, input_channels), np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc,
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride=st.integers(1, 1),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
dilation=st.integers(1, 1),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 4),
**hu.gcs_gpu_only
)
def test_shuffle_input_convolution(
self,
stride,
pad,
kernel,
dilation,
size,
input_channels,
output_channels,
batch_size,
order,
engine,
use_bias,
deformable_group,
gc,
dc,
):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN":
assume(_cudnn_supports(dilation=(dilation > 1), nhwc=(order == "NHWC")))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = (
np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
- 0.5
)
output_size = _conv_2d_output_size(
size, kernel, pad, pad, dilation, stride, stride
)
o, w0 = _conv_2d_shuffle_offsets(
batch_size,
kernel,
output_size,
deformable_group,
input_channels,
output_channels,
)
w = np.ones((output_channels, kernel, kernel, input_channels), np.float32)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
w0 = utils.NHWC2NCHW(w0)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
with core.DeviceScope(gc):
workspace.FeedBlob("w0", w0)
reference_op = core.CreateOperator(
"Conv",
["X", "w0", "b"] if use_bias else ["X", "w0"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc,
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
# CUDNN does NOT support different padding values and we skip it
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride_h=st.integers(1, 3),
stride_w=st.integers(1, 3),
pad_h=st.integers(0, 3),
pad_w=st.integers(0, 3),
kernel=st.integers(2, 5),
size=st.integers(1, 8),
input_channels=st.integers(1, 3),
output_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
shared_buffer=st.booleans(),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only
)
def test_conv_separate_stride_pad_gradients(
self,
stride_h,
stride_w,
pad_h,
pad_w,
kernel,
size,
input_channels,
output_channels,
batch_size,
order,
shared_buffer,
use_bias,
deformable_group,
gc,
dc,
):
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride_h=stride_h,
stride_w=stride_w,
pad_t=pad_h,
pad_l=pad_w,
pad_b=pad_h,
pad_r=pad_w,
kernel=kernel,
order=order,
shared_buffer=int(shared_buffer),
deformable_group=deformable_group,
)
X = (
np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
- 0.5
)
output_size = _conv_2d_output_size(
size, kernel, pad_h, pad_w, 1, stride_h, stride_w
)
o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)
w = (
np.random.rand(output_channels, kernel, kernel, input_channels).astype(
np.float32
)
- 0.5
)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad_h * 2 < kernel or size + pad_w * 2 < kernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only
)
def test_conv_gradients(
self,
stride,
pad,
kernel,
dilation,
size,
input_channels,
output_channels,
batch_size,
order,
engine,
use_bias,
deformable_group,
gc,
dc,
):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN":
assume(_cudnn_supports(dilation=(dilation > 1), nhwc=(order == "NHWC")))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = (
np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
- 0.5
)
output_size = _conv_2d_output_size(
size, kernel, pad, pad, dilation, stride, stride
)
o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)
w = (
np.random.rand(output_channels, kernel, kernel, input_channels).astype(
np.float32
)
- 0.5
)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/deform_conv_test.py
|
import numpy as np
from hypothesis import given, settings, assume
import hypothesis.strategies as st
from caffe2.python import core, utils, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestLocallyConnectedOp(serial.SerializedTestCase):
@given(N=st.integers(1, 3),
C=st.integers(1, 3),
H=st.integers(1, 5),
W=st.integers(1, 5),
M=st.integers(1, 3),
kernel=st.integers(1, 3),
op_name=st.sampled_from(["LC", "LC2D"]),
order=st.sampled_from(["NCHW", "NHWC"]),
use_bias=st.booleans(),
**hu.gcs)
@settings(deadline=10000)
def test_lc_2d(
self, N, C, H, W, M, kernel, op_name, order, use_bias, gc, dc):
if H < kernel:
kernel = H
if W < kernel:
kernel = W
assume(C == kernel * N)
op = core.CreateOperator(
op_name,
["X", "W", "b"] if use_bias else ["X", "W"],
["Y"],
kernels=[kernel, kernel],
order=order,
engine="",
)
Y_H = H - kernel + 1
Y_W = W - kernel + 1
if order == "NCHW":
X = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
W = np.random.rand(Y_H, Y_W, M, C, kernel,
kernel).astype(np.float32) - 0.5
else:
X = np.random.rand(N, H, W, C).astype(np.float32) - 0.5
W = np.random.rand(Y_H, Y_W, M, kernel, kernel,
C).astype(np.float32) - 0.5
b = np.random.rand(Y_H, Y_W, M).astype(np.float32) - 0.5
inputs = [X, W, b] if use_bias else [X, W]
def lc_2d_nchw(X, W, b=None):
N, C, XH, XW = X.shape
YH, YW, M, _, KH, KW = W.shape
def conv(n, m, yh, yw):
sum = b[yh, yw, m] if b is not None else 0
for c in range(C):
for kh in range(KH):
for kw in range(KW):
hh = yh + kh
ww = yw + kw
sum += X[n, c, hh, ww] * W[yh, yw, m, c, kh, kw]
return sum
output = np.zeros((N, M, YH, YW), dtype=np.float32)
for n in range(N):
for m in range(M):
for yh in range(YH):
for yw in range(YW):
output[n, m, yh, yw] = conv(n, m, yh, yw)
return [output]
def lc_2d_nhwc(X, W, b=None):
XT = utils.NHWC2NCHW(X)
WT = np.transpose(W, [0, 1, 2, 5, 3, 4])
output = lc_2d_nchw(XT, WT, b)
return [utils.NCHW2NHWC(output[0])]
ref_op = lc_2d_nchw if order == "NCHW" else lc_2d_nhwc
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref_op,
)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(N=st.integers(1, 3),
C=st.integers(1, 3),
size=st.integers(1, 5),
M=st.integers(1, 3),
kernel=st.integers(1, 3),
op_name=st.sampled_from(["LC", "LC1D"]),
use_bias=st.booleans(),
**hu.gcs)
@settings(deadline=None)
# Increased timeout from 1 second to 5 for ROCM
def test_lc_1d(self, N, C, size, M, kernel, op_name, use_bias, gc, dc):
if size < kernel:
kernel = size
op = core.CreateOperator(
op_name,
["X", "W", "b"] if use_bias else ["X", "W"],
["Y"],
kernels=[kernel],
order="NCHW",
engine="",
)
L = size - kernel + 1
X = np.random.rand(N, C, size).astype(np.float32) - 0.5
W = np.random.rand(L, M, C, kernel).astype(np.float32) - 0.5
b = np.random.rand(L, M).astype(np.float32) - 0.5
inputs = [X, W, b] if use_bias else [X, W]
def lc_1d_nchw(X, W, b=None):
N, C, XL = X.shape
YL, M, _, KL = W.shape
def conv(n, m, yl):
sum = b[yl, m] if b is not None else 0
for c in range(C):
for kl in range(KL):
ll = yl + kl
sum += X[n, c, ll] * W[yl, m, c, kl]
return sum
output = np.zeros((N, M, YL), dtype=np.float32)
for n in range(N):
for m in range(M):
for yl in range(YL):
output[n, m, yl] = conv(n, m, yl)
return [output]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=lc_1d_nchw,
)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(N=st.integers(1, 1),
C=st.integers(1, 1),
T=st.integers(2, 2),
H=st.integers(2, 2),
W=st.integers(2, 2),
M=st.integers(1, 1),
kernel=st.integers(2, 2),
op_name=st.sampled_from(["LC", "LC3D"]),
use_bias=st.booleans(),
**hu.gcs)
@settings(deadline=None)
def test_lc_3d(self, N, C, T, H, W, M, kernel, op_name, use_bias, gc, dc):
if T < kernel:
kernel = T
if H < kernel:
kernel = H
if W < kernel:
kernel = W
op = core.CreateOperator(
op_name,
["X", "W", "b"] if use_bias else ["X", "W"],
["Y"],
kernels=[kernel, kernel, kernel],
order="NCHW",
engine="",
)
Y_T = T - kernel + 1
Y_H = H - kernel + 1
Y_W = W - kernel + 1
X = np.random.rand(N, C, T, H, W).astype(np.float32) - 0.5
W = np.random.rand(Y_T, Y_H, Y_W, M, C, kernel,
kernel, kernel).astype(np.float32) - 0.5
b = np.random.rand(Y_T, Y_H, Y_W, M).astype(np.float32) - 0.5
inputs = [X, W, b] if use_bias else [X, W]
def lc_3d_nchw(X, W, b=None):
N, C, XT, XH, XW = X.shape
YT, YH, YW, M, _, KT, KH, KW = W.shape
def conv(n, m, yt, yh, yw):
sum = b[yt, yh, yw, m] if b is not None else 0
for c in range(C):
for kt in range(KT):
for kh in range(KH):
for kw in range(KW):
tt = yt + kt
hh = yh + kh
ww = yw + kw
sum += X[n, c, tt, hh, ww] * \
W[yt, yh, yw, m, c, kt, kh, kw]
return sum
output = np.zeros((N, M, YT, YH, YW), dtype=np.float32)
for n in range(N):
for m in range(M):
for yt in range(YT):
for yh in range(YH):
for yw in range(YW):
output[n, m, yt, yh, yw] = conv(
n, m, yt, yh, yw)
return [output]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=lc_3d_nchw,
)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
|
pytorch-master
|
caffe2/python/operator_test/locally_connected_op_test.py
|
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestSparseGradient(hu.HypothesisTestCase):
@given(M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
K=st.integers(min_value=5, max_value=15),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_gradient(self, M, N, K, sparsity, gc, dc):
X = np.random.randn(M, K).astype(np.float32)
X[X > sparsity] = 0
X_coo = coo_matrix(X)
val, key, seg = X_coo.data, X_coo.col, X_coo.row
val = val.astype(np.float32)
key = key.astype(np.int64)
seg = seg.astype(np.int32)
Y = np.random.randn(K, N).astype(np.float32)
op = core.CreateOperator(
'SparseUnsortedSegmentWeightedSum',
['Y', 'val', 'key', 'seg'],
['out'],
num_segments=M)
# Gradient check wrt Y
self.assertGradientChecks(
gc, op, [Y, val, key, seg], 0, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/sparse_gradient_checker_test.py
|
from caffe2.python import core
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import numpy as np
import unittest
class TestMathOps(serial.SerializedTestCase):
@given(X=hu.tensor(),
exponent=st.floats(min_value=2.0, max_value=3.0),
**hu.gcs)
def test_elementwise_power(self, X, exponent, gc, dc):
# negative integer raised with non-integer exponent is domain error
X = np.abs(X)
def powf(X):
return (X ** exponent,)
def powf_grad(g_out, outputs, fwd_inputs):
return (exponent * (fwd_inputs[0] ** (exponent - 1)) * g_out,)
op = core.CreateOperator(
"Pow", ["X"], ["Y"], exponent=exponent)
self.assertReferenceChecks(gc, op, [X], powf,
output_to_grad="Y",
grad_reference=powf_grad,
ensure_outputs_are_inferred=True)
@given(X=hu.tensor(),
exponent=st.floats(min_value=-3.0, max_value=3.0),
**hu.gcs)
@settings(deadline=10000)
def test_sign(self, X, exponent, gc, dc):
def signf(X):
return [np.sign(X)]
op = core.CreateOperator(
"Sign", ["X"], ["Y"])
self.assertReferenceChecks(
gc, op, [X], signf, ensure_outputs_are_inferred=True)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/math_ops_test.py
|
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import numpy as np
import unittest
class TestTrigonometricOp(serial.SerializedTestCase):
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_asin(self, X, gc, dc):
self.assertTrigonometricChecks("Asin", X, lambda x: (np.arcsin(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-100, max_value=100)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_atan(self, X, gc, dc):
self.assertTrigonometricChecks("Atan", X, lambda x: (np.arctan(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.5, max_value=0.5)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_tan(self, X, gc, dc):
self.assertTrigonometricChecks("Tan", X, lambda x: (np.tan(X),), gc, dc)
def assertTrigonometricChecks(self, op_name, input, reference, gc, dc):
op = core.CreateOperator(op_name, ["X"], ["Y"])
self.assertReferenceChecks(gc, op, [input], reference)
self.assertDeviceChecks(dc, op, [input], [0])
self.assertGradientChecks(gc, op, [input], 0, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/trigonometric_op_test.py
|
import numpy as np
from hypothesis import given, assume, settings
import hypothesis.strategies as st
from caffe2.python import core, model_helper, brew, utils
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import unittest
class TestInstanceNorm(serial.SerializedTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
# Allocate in the same order as NCHW and transpose to make sure
# the inputs are identical on freshly-seeded calls.
input_data = utils.NCHW2NHWC(input_data)
elif order != "NCHW":
raise Exception('unknown order type ({})'.format(order))
scale_data = np.random.rand(C).astype(np.float32)
bias_data = np.random.rand(C).astype(np.float32)
return input_data, scale_data, bias_data
def _get_op(self, device_option, store_mean, store_inv_stdev, epsilon,
order, inplace=False):
outputs = ['output' if not inplace else "input"]
if store_mean or store_inv_stdev:
outputs += ['mean']
if store_inv_stdev:
outputs += ['inv_stdev']
op = core.CreateOperator(
'InstanceNorm',
['input', 'scale', 'bias'],
outputs,
order=order,
epsilon=epsilon,
device_option=device_option)
return op
def _feed_inputs(self, input_blobs, device_option):
names = ['input', 'scale', 'bias']
for name, blob in zip(names, input_blobs):
self.ws.create_blob(name).feed(blob, device_option=device_option)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(1, 4),
C=st.integers(1, 4),
H=st.integers(2, 4),
W=st.integers(2, 4),
order=st.sampled_from(['NCHW', 'NHWC']),
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
seed=st.integers(0, 1000),
store_inv_stdev=st.booleans())
@settings(deadline=10000)
def test_instance_norm_gradients(
self, gc, dc, N, C, H, W, order, store_mean, store_inv_stdev,
epsilon, seed):
np.random.seed(seed)
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
op = self._get_op(
device_option=gc,
store_mean=store_mean,
store_inv_stdev=store_inv_stdev,
epsilon=epsilon,
order=order)
input_data = np.arange(N * C * H * W).astype(np.float32)
np.random.shuffle(input_data)
if order == "NCHW":
input_data = input_data.reshape(N, C, H, W)
else:
input_data = input_data.reshape(N, H, W, C)
scale_data = np.random.randn(C).astype(np.float32)
bias_data = np.random.randn(C).astype(np.float32)
input_blobs = (input_data, scale_data, bias_data)
output_indices = [0]
# if store_inv_stdev is turned on, store_mean must also be forced on
if store_mean or store_inv_stdev:
output_indices += [1]
if store_inv_stdev:
output_indices += [2]
self.assertDeviceChecks(dc, op, input_blobs, output_indices)
# The gradient only flows from output #0 since the other two only
# store the temporary mean and inv_stdev buffers.
# Check dl/dinput
self.assertGradientChecks(gc, op, input_blobs, 0, [0])
# Check dl/dscale
self.assertGradientChecks(gc, op, input_blobs, 1, [0])
# Check dl/dbias
self.assertGradientChecks(gc, op, input_blobs, 2, [0])
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
seed=st.integers(0, 1000),
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
store_inv_stdev=st.booleans())
def test_instance_norm_layout(self, gc, dc, N, C, H, W, store_mean,
store_inv_stdev, epsilon, seed):
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
outputs = {}
for order in ('NCHW', 'NHWC'):
np.random.seed(seed)
input_blobs = self._get_inputs(N, C, H, W, order)
self._feed_inputs(input_blobs, device_option=gc)
op = self._get_op(
device_option=gc,
store_mean=store_mean,
store_inv_stdev=store_inv_stdev,
epsilon=epsilon,
order=order)
self.ws.run(op)
outputs[order] = self.ws.blobs['output'].fetch()
np.testing.assert_allclose(
outputs['NCHW'],
utils.NHWC2NCHW(outputs["NHWC"]),
atol=1e-4,
rtol=1e-4)
@serial.given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
seed=st.integers(0, 1000),
store_inv_stdev=st.booleans(),
inplace=st.booleans())
def test_instance_norm_reference_check(
self, gc, dc, N, C, H, W, order, store_mean, store_inv_stdev,
epsilon, seed, inplace):
np.random.seed(seed)
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
if order != "NCHW":
assume(not inplace)
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(
device_option=gc,
store_mean=store_mean,
store_inv_stdev=store_inv_stdev,
epsilon=epsilon,
order=order,
inplace=inplace)
def ref(input_blob, scale_blob, bias_blob):
if order == 'NHWC':
input_blob = utils.NHWC2NCHW(input_blob)
mean_blob = input_blob.reshape((N, C, -1)).mean(axis=2)
inv_stdev_blob = 1.0 / \
np.sqrt(input_blob.reshape((N, C, -1)).var(axis=2) + epsilon)
# _bc indicates blobs that are reshaped for broadcast
scale_bc = scale_blob[np.newaxis, :, np.newaxis, np.newaxis]
mean_bc = mean_blob[:, :, np.newaxis, np.newaxis]
inv_stdev_bc = inv_stdev_blob[:, :, np.newaxis, np.newaxis]
bias_bc = bias_blob[np.newaxis, :, np.newaxis, np.newaxis]
normalized_blob = scale_bc * (input_blob - mean_bc) * inv_stdev_bc \
+ bias_bc
if order == 'NHWC':
normalized_blob = utils.NCHW2NHWC(normalized_blob)
if not store_mean and not store_inv_stdev:
return normalized_blob,
elif not store_inv_stdev:
return normalized_blob, mean_blob
else:
return normalized_blob, mean_blob, inv_stdev_blob
self.assertReferenceChecks(gc, op, inputs, ref)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
seed=st.integers(0, 1000),
store_inv_stdev=st.booleans())
def test_instance_norm_device_check(
self, gc, dc, N, C, H, W, order, store_mean, store_inv_stdev,
epsilon, seed):
np.random.seed(seed)
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(
device_option=gc,
store_mean=store_mean,
store_inv_stdev=store_inv_stdev,
epsilon=epsilon,
order=order)
self.assertDeviceChecks(dc, op, inputs, [0])
@given(is_test=st.booleans(),
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
epsilon=st.floats(1e-6, 1e-4),
seed=st.integers(0, 1000))
def test_instance_norm_model_helper(
self, N, C, H, W, order, epsilon, seed, is_test):
np.random.seed(seed)
model = model_helper.ModelHelper(name="test_model")
brew.instance_norm(
model,
'input',
'output',
C,
epsilon=epsilon,
order=order,
is_test=is_test)
input_blob = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
input_blob = utils.NCHW2NHWC(input_blob)
self.ws.create_blob('input').feed(input_blob)
self.ws.create_net(model.param_init_net).run()
self.ws.create_net(model.net).run()
if is_test:
scale = self.ws.blobs['output_s'].fetch()
assert scale is not None
assert scale.shape == (C, )
bias = self.ws.blobs['output_b'].fetch()
assert bias is not None
assert bias.shape == (C, )
output_blob = self.ws.blobs['output'].fetch()
if order == 'NHWC':
output_blob = utils.NHWC2NCHW(output_blob)
assert output_blob.shape == (N, C, H, W)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/instance_norm_test.py
|
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
lengths = [[0], [1, 2], [1, 0, 2, 0]]
features1 = [[],
[1, 2, 2],
[[1, 1], [2, 2], [2, 2]]
]
features2 = [[],
[2, 4, 4],
[[2, 2], [4, 4], [4, 4]]
]
lengths_exp = [[1], [1, 2], [1, 1, 2, 1]]
features1_exp = [[0],
[1, 2, 2],
[[1, 1], [0, 0], [2, 2], [2, 2], [0, 0]]]
features2_exp = [[0],
[2, 4, 4],
[[2, 2], [0, 0], [4, 4], [4, 4], [0, 0]]]
class TestEmptySampleOps(TestCase):
def test_emptysample(self):
for i in range(0, 3):
PadEmptyTest = core.CreateOperator(
'PadEmptySamples',
['lengths', 'features1', 'features2'],
['out_lengths', 'out_features1', 'out_features2'],
)
workspace.FeedBlob(
'lengths',
np.array(lengths[i], dtype=np.int32))
workspace.FeedBlob(
'features1',
np.array(features1[i], dtype=np.int64))
workspace.FeedBlob(
'features2',
np.array(features2[i], dtype=np.int64))
workspace.RunOperatorOnce(PadEmptyTest)
np.testing.assert_allclose(
lengths_exp[i],
workspace.FetchBlob('out_lengths'),
atol=1e-4, rtol=1e-4, err_msg='Mismatch in lengths')
np.testing.assert_allclose(
features1_exp[i],
workspace.FetchBlob('out_features1'),
atol=1e-4, rtol=1e-4, err_msg='Mismatch in features1')
np.testing.assert_allclose(
features2_exp[i],
workspace.FetchBlob('out_features2'),
atol=1e-4, rtol=1e-4, err_msg='Mismatch in features2')
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/emptysample_ops_test.py
|
import numpy as np
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestAssert(hu.HypothesisTestCase):
@given(
dtype=st.sampled_from(['bool_', 'int32', 'int64']),
shape=st.lists(elements=st.integers(1, 10), min_size=1, max_size=4),
**hu.gcs)
@settings(deadline=10000)
def test_assert(self, dtype, shape, gc, dc):
test_tensor = np.random.rand(*shape).astype(np.dtype(dtype))
op = core.CreateOperator('Assert', ['X'], [])
def assert_ref(X):
return []
try:
self.assertReferenceChecks(gc, op, [test_tensor], assert_ref)
except Exception:
assert(not np.all(test_tensor))
|
pytorch-master
|
caffe2/python/operator_test/assert_test.py
|
from hypothesis import given, settings
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2, hsm_pb2
from caffe2.python import workspace, core, gradient_checker
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.hsm_util as hsmu
# User inputs tree using protobuf file or, in this case, python utils
# The hierarchy in this test looks as shown below. Note that the final subtrees
# (with word_ids as leaves) have been collapsed for visualization
# *
# / \
# * 5,6,7,8
# / \
# 0,1,2 3,4
tree = hsm_pb2.TreeProto()
words = [[0, 1, 2], [3, 4], [5, 6, 7, 8]]
node1 = hsmu.create_node_with_words(words[0], "node1")
node2 = hsmu.create_node_with_words(words[1], "node2")
node3 = hsmu.create_node_with_words(words[2], "node3")
node4 = hsmu.create_node_with_nodes([node1, node2], "node4")
node = hsmu.create_node_with_nodes([node4, node3], "node5")
tree.root_node.MergeFrom(node)
# structure:
# node5: [0, 2, ["node4", "node3"]] # offset, length, "node4, node3"
# node4: [2, 2, ["node1", "node2"]]
# node1: [4, 3, [0, 1 ,2]]
# node2: [7, 2, [3, 4]
# node3: [9, 4, [5, 6, 7, 8]
struct = [[0, 2, ["node4", "node3"], "node5"],
[2, 2, ["node1", "node2"], "node4"],
[4, 3, [0, 1, 2], "node1"],
[7, 2, [3, 4], "node2"],
[9, 4, [5, 6, 7, 8], "node3"]]
# Internal util to translate input tree to list of (word_id,path). serialized
# hierarchy is passed into the operator_def as a string argument,
hierarchy_proto = hsmu.create_hierarchy(tree)
arg = caffe2_pb2.Argument()
arg.name = "hierarchy"
arg.s = hierarchy_proto.SerializeToString()
beam = 5
args_search = []
arg_search = caffe2_pb2.Argument()
arg_search.name = "tree"
arg_search.s = tree.SerializeToString()
args_search.append(arg_search)
arg_search = caffe2_pb2.Argument()
arg_search.name = "beam"
arg_search.f = beam
args_search.append(arg_search)
class TestHsm(hu.HypothesisTestCase):
def test_hsm_search(self):
samples = 10
dim_in = 5
X = np.random.rand(samples, dim_in).astype(np.float32) - 0.5
w = np.random.rand(hierarchy_proto.size, dim_in) \
.astype(np.float32) - 0.5
b = np.random.rand(hierarchy_proto.size).astype(np.float32) - 0.5
labels = np.array([np.random.randint(0, 8) for i in range(samples)]) \
.astype(np.int32)
workspace.GlobalInit(['caffe2'])
workspace.FeedBlob("data", X)
workspace.FeedBlob("weights", w)
workspace.FeedBlob("bias", b)
workspace.FeedBlob("labels", labels)
op = core.CreateOperator(
'HSoftmaxSearch',
['data', 'weights', 'bias'],
['names', 'scores'],
'HSoftmaxSearch',
arg=args_search)
workspace.RunOperatorOnce(op)
names = workspace.FetchBlob('names')
scores = workspace.FetchBlob('scores')
def simulation_hsm_search():
names = []
scores = []
for line in struct:
s, e = line[0], line[0] + line[1]
score = np.dot(X, w[s:e].transpose()) + b[s:e]
score = np.exp(score - np.max(score, axis=1, keepdims=True))
score /= score.sum(axis=1, keepdims=True)
score = -np.log(score)
score = score.transpose()
idx = -1
for j, n in enumerate(names):
if n == line[3]:
idx = j
score += scores[j]
if idx == -1:
score[score > beam] = np.inf
else:
score[score - scores[idx] > beam] = np.inf
for i, name in enumerate(line[2]):
scores.append(score[i])
names.append(name)
scores = np.vstack(scores)
return names, scores.transpose()
p_names, p_scores = simulation_hsm_search()
idx = np.argsort(p_scores, axis=1)
p_scores = np.sort(p_scores, axis=1)
p_names = np.array(p_names)[idx]
for i in range(names.shape[0]):
for j in range(names.shape[1]):
if names[i][j]:
self.assertEquals(
names[i][j], p_names[i][j].item().encode('utf-8'))
self.assertAlmostEqual(
scores[i][j], p_scores[i][j], delta=0.001)
def test_hsm_run_once(self):
workspace.GlobalInit(['caffe2'])
workspace.FeedBlob("data",
np.random.randn(1000, 100).astype(np.float32))
workspace.FeedBlob("weights",
np.random.randn(1000, 100).astype(np.float32))
workspace.FeedBlob("bias", np.random.randn(1000).astype(np.float32))
workspace.FeedBlob("labels", np.random.rand(1000).astype(np.int32) * 9)
op = core.CreateOperator(
'HSoftmax',
['data', 'weights', 'bias', 'labels'],
['output', 'intermediate_output'],
'HSoftmax',
arg=[arg])
self.assertTrue(workspace.RunOperatorOnce(op))
# Test to check value of sum of squared losses in forward pass for given
# input
def test_hsm_forward(self):
cpu_device_option = caffe2_pb2.DeviceOption()
grad_checker = gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "default")
samples = 9
dim_in = 5
X = np.zeros((samples, dim_in)).astype(np.float32) + 1
w = np.zeros((hierarchy_proto.size, dim_in)).astype(np.float32) + 1
b = np.array([i for i in range(hierarchy_proto.size)])\
.astype(np.float32)
labels = np.array([i for i in range(samples)]).astype(np.int32)
workspace.GlobalInit(['caffe2'])
workspace.FeedBlob("data", X)
workspace.FeedBlob("weights", w)
workspace.FeedBlob("bias", b)
workspace.FeedBlob("labels", labels)
op = core.CreateOperator(
'HSoftmax',
['data', 'weights', 'bias', 'labels'],
['output', 'intermediate_output'],
'HSoftmax',
arg=[arg])
grad_ops, g_input = core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
loss, _ = grad_checker.GetLossAndGrad(
op, grad_ops, [X, w, b, labels], op.input, 0, g_input[0], [0]
)
self.assertAlmostEqual(loss, 44.269, delta=0.001)
# Test to compare gradient calculated using the gradient operator and the
# symmetric derivative calculated using Euler Method
# TODO : convert to both cpu and gpu test when ready.
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_hsm_gradient(self, gc, dc):
samples = 10
dim_in = 5
X = np.random.rand(samples, dim_in).astype(np.float32) - 0.5
w = np.random.rand(hierarchy_proto.size, dim_in) \
.astype(np.float32) - 0.5
b = np.random.rand(hierarchy_proto.size).astype(np.float32) - 0.5
labels = np.array([np.random.randint(0, 8) for i in range(samples)]) \
.astype(np.int32)
workspace.GlobalInit(['caffe2'])
workspace.FeedBlob("data", X)
workspace.FeedBlob("weights", w)
workspace.FeedBlob("bias", b)
workspace.FeedBlob("labels", labels)
op = core.CreateOperator(
'HSoftmax',
['data', 'weights', 'bias', 'labels'],
['output', 'intermediate_output'],
'HSoftmax',
arg=[arg])
self.assertDeviceChecks(dc, op, [X, w, b, labels], [0])
for i in range(3):
self.assertGradientChecks(gc, op, [X, w, b, labels], i, [0])
def test_huffman_tree_hierarchy(self):
workspace.GlobalInit(['caffe2'])
labelSet = list(range(0, 6))
counts = [1, 2, 3, 4, 5, 6]
labels = sum([[l] * c for (l, c) in zip(labelSet, counts)], [])
Y = np.array(labels).astype(np.int64)
workspace.FeedBlob("labels", Y)
arg = caffe2_pb2.Argument()
arg.name = 'num_classes'
arg.i = 6
op = core.CreateOperator(
'HuffmanTreeHierarchy',
['labels'],
['huffman_tree'],
'HuffmanTreeHierarchy',
arg=[arg])
workspace.RunOperatorOnce(op)
huffmanTreeOutput = workspace.FetchBlob('huffman_tree')
treeOutput = hsm_pb2.TreeProto()
treeOutput.ParseFromString(huffmanTreeOutput[0])
treePathOutput = hsmu.create_hierarchy(treeOutput)
label_to_path = {}
for path in treePathOutput.paths:
label_to_path[path.word_id] = path
def checkPath(label, indices, code):
path = label_to_path[label]
self.assertEqual(len(path.path_nodes), len(code))
self.assertEqual(len(path.path_nodes), len(code))
for path_node, index, target in \
zip(path.path_nodes, indices, code):
self.assertEqual(path_node.index, index)
self.assertEqual(path_node.target, target)
checkPath(0, [0, 4, 6, 8], [1, 0, 0, 0])
checkPath(1, [0, 4, 6, 8], [1, 0, 0, 1])
checkPath(2, [0, 4, 6], [1, 0, 1])
checkPath(3, [0, 2], [0, 0])
checkPath(4, [0, 2], [0, 1])
checkPath(5, [0, 4], [1, 1])
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/hsm_test.py
|
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import itertools as it
class TestReduceOps(serial.SerializedTestCase):
def run_reduce_op_test_impl(
self, op_name, X, axes, keepdims, ref_func, gc, dc, allow_broadcast_fastpath):
extra_args = dict(allow_broadcast_fastpath=True) if allow_broadcast_fastpath else {}
if axes is None:
op = core.CreateOperator(
op_name,
["X"],
["Y"],
keepdims=keepdims,
**extra_args,
)
else:
op = core.CreateOperator(
op_name,
["X"],
["Y"],
axes=axes,
keepdims=keepdims,
**extra_args,
)
def ref(X):
return [ref_func(
X, axis=None if axes is None else tuple(axes),
keepdims=keepdims)]
with self.set_disable_serialized_check(allow_broadcast_fastpath):
self.assertReferenceChecks(gc, op, [X], ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
def run_reduce_op_test(
self, op_name, X, keepdims, num_axes, ref_func, gc, dc, allow_broadcast_fastpath=False):
self.run_reduce_op_test_impl(
op_name, X, None, keepdims, ref_func, gc, dc, allow_broadcast_fastpath)
num_dims = len(X.shape)
if num_dims < num_axes:
self.run_reduce_op_test_impl(
op_name, X, range(num_dims), keepdims, ref_func, gc, dc, allow_broadcast_fastpath)
else:
for axes in it.combinations(range(num_dims), num_axes):
self.run_reduce_op_test_impl(
op_name, X, axes, keepdims, ref_func, gc, dc, allow_broadcast_fastpath)
@serial.given(
X=hu.tensor(max_dim=3, dtype=np.float32),
keepdims=st.booleans(),
allow_broadcast_fastpath=st.booleans(),
num_axes=st.integers(1, 3), **hu.gcs)
def test_reduce_min(self, X, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
X_dims = X.shape
X_size = X.size
X = np.arange(X_size, dtype=np.float32)
np.random.shuffle(X)
X = X.reshape(X_dims)
self.run_reduce_op_test(
"ReduceMin", X, keepdims, num_axes, np.min, gc, dc,
allow_broadcast_fastpath=allow_broadcast_fastpath)
@serial.given(
X=hu.tensor(max_dim=3, dtype=np.float32),
keepdims=st.booleans(),
allow_broadcast_fastpath=st.booleans(),
num_axes=st.integers(1, 3), **hu.gcs)
def test_reduce_max(self, X, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
X_dims = X.shape
X_size = X.size
X = np.arange(X_size, dtype=np.float32)
np.random.shuffle(X)
X = X.reshape(X_dims)
self.run_reduce_op_test(
"ReduceMax", X, keepdims, num_axes, np.max, gc, dc,
allow_broadcast_fastpath=allow_broadcast_fastpath)
@given(n=st.integers(0, 5), m=st.integers(0, 5), k=st.integers(0, 5),
t=st.integers(0, 5), keepdims=st.booleans(),
allow_broadcast_fastpath=st.booleans(),
num_axes=st.integers(1, 3), **hu.gcs)
@settings(deadline=10000)
def test_reduce_sum(self, n, m, k, t, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
X = np.random.randn(n, m, k, t).astype(np.float32)
self.run_reduce_op_test(
"ReduceSum", X, keepdims, num_axes, np.sum, gc, dc,
allow_broadcast_fastpath=allow_broadcast_fastpath)
@serial.given(X=hu.tensor(dtype=np.float32), keepdims=st.booleans(),
allow_broadcast_fastpath=st.booleans(),
num_axes=st.integers(1, 4), **hu.gcs)
def test_reduce_mean(self, X, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
self.run_reduce_op_test(
"ReduceMean", X, keepdims, num_axes, np.mean, gc, dc,
allow_broadcast_fastpath=allow_broadcast_fastpath)
@given(n=st.integers(1, 3), m=st.integers(1, 3), k=st.integers(1, 3),
keepdims=st.booleans(), allow_broadcast_fastpath=st.booleans(),
num_axes=st.integers(1, 3), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_reduce_l1(self, n, m, k, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
X = np.arange(n * m * k, dtype=np.float32) - 0.5
np.random.shuffle(X)
X = X.reshape((m, n, k))
self.run_reduce_op_test(
"ReduceL1", X, keepdims, num_axes, getNorm(1), gc, dc,
allow_broadcast_fastpath=allow_broadcast_fastpath)
@serial.given(n=st.integers(1, 5), m=st.integers(1, 5), k=st.integers(1, 5),
keepdims=st.booleans(), allow_broadcast_fastpath=st.booleans(),
num_axes=st.integers(1, 3), **hu.gcs_cpu_only)
def test_reduce_l2(self, n, m, k, keepdims, allow_broadcast_fastpath, num_axes, gc, dc):
X = np.random.randn(n, m, k).astype(np.float32)
self.run_reduce_op_test(
"ReduceL2", X, keepdims, num_axes, getNorm(2), gc, dc,
allow_broadcast_fastpath=allow_broadcast_fastpath)
def getNorm(p):
if p == 1:
def norm(X, axis, keepdims):
return np.sum(np.abs(X), axis=axis, keepdims=keepdims)
elif p == 2:
def norm(X, axis, keepdims):
return np.sqrt(np.sum(np.power(X, 2), axis=axis, keepdims=keepdims))
else:
raise RuntimeError("Only L1 and L2 norms supported")
return norm
class TestReduceFrontReductions(serial.SerializedTestCase):
def grad_variant_input_test(self, grad_op_name, X, ref, num_reduce_dim):
workspace.ResetWorkspace()
Y = np.array(ref(X)[0]).astype(np.float32)
dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)
shape = np.array(X.shape).astype(np.int64)
workspace.FeedBlob("X", X)
workspace.FeedBlob("dY", dY)
workspace.FeedBlob("shape", shape)
grad_op = core.CreateOperator(
grad_op_name, ["dY", "X"], ["dX"], num_reduce_dim=num_reduce_dim)
grad_op1 = core.CreateOperator(
grad_op_name, ["dY", "shape"], ["dX1"],
num_reduce_dim=num_reduce_dim)
workspace.RunOperatorOnce(grad_op)
workspace.RunOperatorOnce(grad_op1)
dX = workspace.FetchBlob("dX")
dX1 = workspace.FetchBlob("dX1")
np.testing.assert_array_equal(dX, dX1)
def max_op_test(
self, op_name, num_reduce_dim, gc, dc, in_data, in_names, ref_max):
op = core.CreateOperator(
op_name,
in_names,
["outputs"],
num_reduce_dim=num_reduce_dim
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=in_data,
reference=ref_max,
)
# Skip gradient check because it is too unreliable with max.
# Just check CPU and CUDA have same results
Y = np.array(ref_max(*in_data)[0]).astype(np.float32)
dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)
if len(in_data) == 2:
grad_in_names = ["dY", in_names[0], "Y", in_names[1]]
grad_in_data = [dY, in_data[0], Y, in_data[1]]
else:
grad_in_names = ["dY", in_names[0], "Y"]
grad_in_data = [dY, in_data[0], Y]
grad_op = core.CreateOperator(
op_name + "Gradient",
grad_in_names,
["dX"],
num_reduce_dim=num_reduce_dim
)
self.assertDeviceChecks(dc, grad_op, grad_in_data, [0])
def reduce_op_test(self, op_name, op_ref, in_data, in_names,
num_reduce_dims, device):
op = core.CreateOperator(
op_name,
in_names,
["outputs"],
num_reduce_dim=num_reduce_dims
)
self.assertReferenceChecks(
device_option=device,
op=op,
inputs=in_data,
reference=op_ref
)
self.assertGradientChecks(
device, op, in_data, 0, [0], stepsize=1e-2, threshold=1e-2)
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
@settings(deadline=10000)
def test_reduce_front_sum(self, num_reduce_dim, gc, dc):
X = np.random.rand(7, 4, 3, 5).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test(
"ReduceFrontSum", ref_sum, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceFrontSumGradient", X, ref_sum, num_reduce_dim)
@given(num_reduce_dim=st.integers(0, 4), seed=st.integers(0, 4), **hu.gcs)
def test_reduce_front_sum_empty_batch(self, num_reduce_dim, seed, gc, dc):
np.random.seed(seed)
X = np.random.rand(0, 4, 3, 5).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test(
"ReduceFrontSum", ref_sum, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceFrontSumGradient", X, ref_sum, num_reduce_dim)
# test the second iteration
not_empty_X = np.random.rand(2, 4, 3, 5).astype(np.float32)
net = core.Net('test')
with core.DeviceScope(gc):
net.ReduceFrontSum(
['X'], ['output'],
num_reduce_dim=num_reduce_dim
)
workspace.CreateNet(net)
workspace.FeedBlob('X', not_empty_X)
workspace.RunNet(workspace.GetNetName(net))
output = workspace.FetchBlob('output')
np.testing.assert_allclose(
output, ref_sum(not_empty_X)[0], atol=1e-3)
workspace.FeedBlob('X', X)
workspace.RunNet(workspace.GetNetName(net))
output = workspace.FetchBlob('output')
np.testing.assert_allclose(output, ref_sum(X)[0], atol=1e-3)
@given(**hu.gcs)
@settings(deadline=None)
def test_reduce_front_sum_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_sum(X, lengths):
Y = X.reshape(d, lengths.size)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.sum(Y[:lengths[ii], ii])
return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
self.reduce_op_test(
"ReduceFrontSum", ref_sum, [X, lengths], ["input", "lengths"],
num_reduce_dim, gc)
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
@settings(deadline=10000)
def test_reduce_front_mean(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_mean(X):
return [np.mean(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test(
"ReduceFrontMean", ref_mean, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceFrontMeanGradient", X, ref_mean, num_reduce_dim)
@given(**hu.gcs)
@settings(deadline=10000)
def test_reduce_front_mean_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_mean(X, lengths):
Y = X.reshape(d, lengths.size)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.mean(Y[:lengths[ii], ii])
return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
self.reduce_op_test(
"ReduceFrontMean", ref_mean, [X, lengths], ["input", "lengths"],
num_reduce_dim, gc)
@serial.given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
def test_reduce_front_max(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_frontmax(X):
return [np.max(X, axis=(tuple(range(num_reduce_dim))))]
self.max_op_test(
"ReduceFrontMax", num_reduce_dim, gc, dc, [X], ["X"], ref_frontmax)
@given(**hu.gcs)
def test_reduce_front_max_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_max(X, lengths):
Y = X.reshape(d, lengths.size)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.max(Y[:lengths[ii], ii])
return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
self.max_op_test(
"ReduceFrontMax", num_reduce_dim, gc, dc, [X, lengths],
["X", "lengths"], ref_max)
@serial.given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
def test_reduce_back_max(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_backmax(X):
return [np.max(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
self.max_op_test(
"ReduceBackMax", num_reduce_dim, gc, dc, [X], ["X"], ref_backmax)
@given(**hu.gcs)
def test_reduce_back_max_with_length(self, gc, dc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_max(X, lengths):
Y = X.reshape(lengths.size, d)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.max(Y[ii, :lengths[ii]])
return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
self.max_op_test(
"ReduceBackMax", num_reduce_dim, gc, dc, [X, lengths],
["X", "lengths"], ref_max)
@given(**hu.gcs)
@settings(deadline=10000)
def test_reduce_back_sum(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
self.reduce_op_test(
"ReduceBackSum", ref_sum, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceBackSumGradient", X, ref_sum, num_reduce_dim)
@given(**hu.gcs)
@settings(deadline=10000)
def test_reduce_back_sum_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_sum(X, lengths):
Y = X.reshape(lengths.size, d)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.sum(Y[ii, :lengths[ii]])
return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
self.reduce_op_test(
"ReduceBackSum", ref_sum, [X, lengths], ["input", "lengths"],
num_reduce_dim, gc)
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
@settings(deadline=10000)
def test_reduce_back_mean(self, num_reduce_dim, dc, gc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_mean(X):
return [np.mean(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
self.reduce_op_test(
"ReduceBackMean", ref_mean, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceBackMeanGradient", X, ref_mean, num_reduce_dim)
@given(**hu.gcs)
@settings(deadline=None)
def test_reduce_back_mean_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_mean(X, lengths):
Y = X.reshape(lengths.size, d)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.mean(Y[ii, :lengths[ii]])
return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
self.reduce_op_test(
"ReduceBackMean", ref_mean, [X, lengths], ["input", "lengths"],
num_reduce_dim, gc)
|
pytorch-master
|
caffe2/python/operator_test/reduce_ops_test.py
|
import caffe2.python.serialized_test.serialized_test_util as serial
def pytest_addoption(parser):
parser.addoption(
'-G',
'--generate-serialized',
action='store_true',
dest='generate',
help='generate output files (default=false, compares to current files)',
)
parser.addoption(
'-O',
'--output',
default=serial.DATA_DIR,
dest='output',
help='output directory (default: %(default)s)'
)
parser.addoption(
'-D',
'--disable-serialized-check',
action='store_true',
dest='disable',
help='disable checking serialized tests'
)
parser.addoption(
'-C',
'--disable-gen-coverage',
action='store_true',
dest='disable_coverage',
help='disable generating coverage markdown file'
)
def pytest_configure(config):
generate = config.getoption('generate', default=False)
output = config.getoption('output', default=serial.DATA_DIR)
disable = config.getoption('disable', default=False)
disable_coverage = config.getoption('disable_coverage', default=False)
serial._output_context.__setattr__('should_generate_output', generate)
serial._output_context.__setattr__('output_dir', output)
serial._output_context.__setattr__('disable_serialized_check', disable)
serial._output_context.__setattr__('disable_gen_coverage', disable_coverage)
|
pytorch-master
|
caffe2/python/operator_test/conftest.py
|
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, assume, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestMomentumSGD(serial.SerializedTestCase):
@given(n=st.integers(4, 8), nesterov=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_momentum_sgd(self, n, nesterov, gc, dc):
param = np.random.rand(n).astype(np.float32)
grad = np.random.rand(n).astype(np.float32)
lr = np.random.rand(1).astype(np.float32)
param_momentum = np.random.rand(n).astype(np.float32)
momentum = 0.9
def momentum_sgd(grad, param_momentum, lr, param=None):
if not nesterov:
adjusted_gradient = lr * grad + momentum * param_momentum
if param is None:
return [adjusted_gradient, adjusted_gradient]
else:
paramup = param - adjusted_gradient
return [adjusted_gradient, adjusted_gradient, paramup]
else:
m_new = momentum * param_momentum + lr * grad
grad_new = (1 + momentum) * m_new - momentum * param_momentum
if param is None:
return [grad_new, m_new]
else:
paramup = param - grad_new
return [grad_new, m_new, paramup]
op = core.CreateOperator(
"MomentumSGDUpdate",
["grad", "param_momentum", "lr", "param"],
["grad", "param_momentum", "param"],
momentum=momentum,
nesterov=int(nesterov),
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[grad, param_momentum, lr, param],
reference=momentum_sgd
)
op_noparam = core.CreateOperator(
"MomentumSGD",
["grad", "param_momentum", "lr"],
["grad", "param_momentum"],
momentum=momentum,
nesterov=int(nesterov),
)
self.assertReferenceChecks(
device_option=gc,
op=op_noparam,
inputs=[grad, param_momentum, lr],
reference=momentum_sgd
)
@given(
inputs=hu.tensors(n=3),
momentum=st.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
data_strategy=st.data(),
**hu.gcs
)
@settings(deadline=10000)
def test_sparse_momentum_sgd(
self, inputs, momentum, nesterov, lr, data_strategy, gc, dc
):
w, grad, m = inputs
# Create an indexing array containing values which index into grad
indices = data_strategy.draw(
hu.tensor(
max_dim=1,
min_value=1,
max_value=grad.shape[0],
dtype=np.int64,
elements=st.sampled_from(np.arange(grad.shape[0])),
),
)
# Verify that the generated indices are unique
assume(
np.array_equal(
np.unique(indices.flatten()),
np.sort(indices.flatten())))
# Sparsify grad
grad = grad[indices]
# Make momentum >= 0
m = np.abs(m)
# Convert lr to a numpy array
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"SparseMomentumSGDUpdate", ["grad", "m", "lr", "param", "indices"],
["adjusted_grad", "m", "param"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc
)
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
def sparse(grad, m, lr, param, i):
grad_new, m_new = momentum_sgd(grad, m[i], lr)
m[i] = m_new
param[i] -= grad_new
return (grad_new, m, param)
self.assertReferenceChecks(
gc,
op,
[grad, m, lr, w, indices],
sparse)
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/31368")
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@given(n=st.integers(4, 8), nesterov=st.booleans(), **hu.gcs)
def test_fp16momentum_sgd(self, n, nesterov, gc, dc):
assume(core.IsGPUDeviceType(gc.device_type))
gpuvers = workspace.GetDeviceProperties(0)["major"]
if gc.device_type == caffe2_pb2.CUDA and gpuvers < 6:
print("No FP16 support because major version {} < 6".format(gpuvers))
return
param = np.random.rand(n).astype(np.float16)
grad = np.random.rand(n).astype(np.float16)
lr = np.random.rand(1).astype(np.float32)
param_momentum = np.random.rand(n).astype(np.float16)
momentum = 0.9
def momentum_sgd(grad, param_momentum, lr, param=None):
if not nesterov:
adjusted_gradient = lr * grad + momentum * param_momentum
paramup = param - adjusted_gradient
return [adjusted_gradient, adjusted_gradient, paramup]
else:
m_new = momentum * param_momentum + lr * grad
grad_new = (1 + momentum) * m_new - momentum * param_momentum
paramup = param - grad_new
return [grad_new, m_new, paramup]
op = core.CreateOperator(
"FP16MomentumSGDUpdate",
["grad", "param_momentum", "lr", "param"],
["grad", "param_momentum", "param"],
momentum=momentum,
nesterov=int(nesterov),
weight_decay=0.0,
)
threshold = 1e-3 if (gc.device_type == caffe2_pb2.HIP) else 1e-4
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[grad, param_momentum, lr, param],
reference=momentum_sgd,
threshold=threshold
)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/momentum_sgd_test.py
|
from hypothesis import given
import numpy as np
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
@st.composite
def _dev_options(draw):
op_dev = draw(st.sampled_from(hu.device_options))
if op_dev == hu.cpu_do:
# the CPU op can only handle CPU tensor
input_blob_dev = hu.cpu_do
else:
input_blob_dev = draw(st.sampled_from(hu.device_options))
return op_dev, input_blob_dev
class TestEnsureCPUOutputOp(hu.HypothesisTestCase):
@given(
input=hu.tensor(dtype=np.float32),
dev_options=_dev_options()
)
def test_ensure_cpu_output(self, input, dev_options):
op_dev, input_blob_dev = dev_options
net = core.Net('test_net')
data = net.GivenTensorFill(
[],
["data"],
values=input,
shape=input.shape,
device_option=input_blob_dev
)
data_cpu = net.EnsureCPUOutput(
[data],
["data_cpu"],
device_option=op_dev
)
workspace.RunNetOnce(net)
data_cpu_value = workspace.FetchBlob(data_cpu)
np.testing.assert_allclose(input, data_cpu_value)
|
pytorch-master
|
caffe2/python/operator_test/ensure_cpu_output_op_test.py
|
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import numpy as np
class TestPercentileOp(hu.HypothesisTestCase):
def _test_percentile_op(
self,
original_inp,
value_to_pct_map,
dist_lengths,
expected_values
):
op = core.CreateOperator(
'Percentile',
['original_values', 'value_to_pct_map', 'dist_lengths'],
['percentile_values']
)
workspace.FeedBlob('original_values', np.array(
original_inp, dtype=np.float32))
workspace.FeedBlob(
'value_to_pct_map', np.array(value_to_pct_map, dtype=np.float32))
workspace.FeedBlob('dist_lengths', np.array(
dist_lengths, dtype=np.int32))
workspace.RunOperatorOnce(op)
np.testing.assert_array_almost_equal(
workspace.FetchBlob('percentile_values'),
np.array(expected_values),
decimal=5
)
self._test_shape_inference(
original_inp,
value_to_pct_map,
dist_lengths,
expected_values
)
def _test_shape_inference(
self,
original_inp,
value_to_pct_map,
dist_lengths,
expected_values
):
net = core.Net('test_shape_inference')
result = net.Percentile(
['original_values', 'value_to_pct_map', 'dist_lengths'],
['percentile_values']
)
workspace.FeedBlob('original_values', np.array(
original_inp, dtype=np.float32))
workspace.FeedBlob(
'value_to_pct_map', np.array(value_to_pct_map, dtype=np.float32))
workspace.FeedBlob('dist_lengths', np.array(
dist_lengths, dtype=np.int32))
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertEqual(shapes[result], list(workspace.blobs[result].shape))
self.assertEqual(shapes[result], list(workspace.blobs['original_values'].shape))
self.assertEqual(types[result], core.DataType.FLOAT)
def test_percentile_op_with_only_one_dist(self):
self._test_percentile_op(
original_inp=[[5]],
value_to_pct_map=[[5, 0.4]],
dist_lengths=[1],
expected_values=[[0.4]]
)
def test_percentile_op_with_all_elements_in_map(self):
self._test_percentile_op(
original_inp=[[3, 4], [10, 4]],
value_to_pct_map=[[3, 0.3], [4, 0.6],
[10, 0.8], [4, 0.5], [5, 0.6]],
dist_lengths=[3, 2],
expected_values=[[0.3, 0.5], [0.8, 0.5]],
)
def test_percentile_op_with_same_value(self):
self._test_percentile_op(
original_inp=[[1, 1], [1, 2]],
value_to_pct_map=[[1, 0.1], [4, 0.4], [2, 0.5]],
dist_lengths=[2, 1],
expected_values=[[0.1, 0.0], [0.1, 0.5]]
)
def test_percentile_op_with_elements_bigger_than_map_range(self):
self._test_percentile_op(
original_inp=[[1, 5], [3, 4]],
value_to_pct_map=[[1, 0.1], [4, 0.4], [2, 0.1], [3, 0.3]],
dist_lengths=[2, 2],
expected_values=[[0.1, 1.], [0.3, 1.0]]
)
def test_percentile_op_with_elements_smaller_than_map_range(self):
self._test_percentile_op(
original_inp=[[1], [5], [6]],
value_to_pct_map=[[2, 0.2], [5, 0.5], [7, 0.5]],
dist_lengths=[3],
expected_values=[[0.0], [0.5], [0.5]]
)
def test_percentile_op_with_interpolation(self):
self._test_percentile_op(
original_inp=[[3, 2, 5], [6, 7, 8]],
value_to_pct_map=[[1, 0.1], [4, 0.7], [4.5, 0.8],
[6, 0.5], [8, 0.9],
[8, 0.6]],
dist_lengths=[3, 2, 1],
expected_values=[[0.5, 0.0, 0.0], [1.0, 0.7, 0.6]]
)
def test_percentile_op_with_large_sample_size_per_dist(self):
self._test_percentile_op(
original_inp=[[3, 1], [5, 7]],
value_to_pct_map=[[3, 0.5], [4, 0.6], [5, 0.7],
[1, 0.2], [2, 0.3], [5, 0.8]],
dist_lengths=[3, 3],
expected_values=[[0.5, 0.2], [0.7, 1.0]]
)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/percentile_op_test.py
|
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
class TestRMSNormOp(hu.HypothesisTestCase):
@given(
M=st.integers(0, 8),
N=st.integers(1, 16),
eps=st.floats(0, 1e-3),
dtype=st.sampled_from([np.float32, np.float64]),
**hu.gcs,
)
@settings(deadline=None)
def test_rms_norm(self, M, N, eps, dtype, gc, dc):
X = (np.random.randn(M, N) * 2.0 + 1.0).astype(dtype)
gamma = np.random.randn(N).astype(dtype)
beta = np.random.randn(N).astype(dtype)
op = core.CreateOperator(
"RMSNorm",
["X", "gamma", "beta"],
["Y", "rrms"],
eps=eps,
)
def rms_norm_ref(X, gamma, beta):
rrms = 1.0 / np.sqrt(np.mean(np.square(X), axis=1) + eps)
Y = X * np.expand_dims(rrms, axis=1) * gamma + beta
return Y, rrms
inputs = [X, gamma, beta]
self.assertReferenceChecks(gc, op, inputs, rms_norm_ref)
self.assertDeviceChecks(dc, op, inputs, [0, 1])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/rms_norm_op_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestConditionalOp(serial.SerializedTestCase):
@serial.given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
data_t = np.random.random((rows_num, 10, 20)).astype(np.float32)
data_f = np.random.random((rows_num, 10, 20)).astype(np.float32)
condition = np.random.choice(a=[True, False], size=rows_num)
def ref(condition, data_t, data_f):
output = [
data_t[i] if condition[i] else data_f[i]
for i in range(rows_num)
]
return (output,)
self.assertReferenceChecks(gc, op, [condition, data_t, data_f], ref)
|
pytorch-master
|
caffe2/python/operator_test/conditional_test.py
|
from caffe2.python import core, workspace
from caffe2.python.core import CreatePythonOperator
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class PythonOpTest(hu.HypothesisTestCase):
@given(x=hu.tensor(),
n=st.integers(min_value=1, max_value=20),
w=st.integers(min_value=1, max_value=20))
@settings(deadline=10000)
def test_simple_python_op(self, x, n, w):
def g(input_, output):
output[...] = input_
def f(inputs, outputs):
outputs[0].reshape(inputs[0].shape)
g(inputs[0].data, outputs[0].data)
ops = [CreatePythonOperator(f, ["x"], [str(i)]) for i in range(n)]
net = core.Net("net")
net.Proto().op.extend(ops)
net.Proto().type = "dag"
net.Proto().num_workers = w
iters = 100
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
workspace.FeedBlob("x", x)
workspace.RunPlan(plan.Proto().SerializeToString())
for i in range(n):
y = workspace.FetchBlob(str(i))
np.testing.assert_almost_equal(x, y)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/python_op_test.py
|
import itertools
import numpy as np
import tempfile
import unittest
import os
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
class TestMap(hu.HypothesisTestCase):
def test_create_map(self):
dtypes = [core.DataType.INT32, core.DataType.INT64]
for key_dtype, value_dtype in itertools.product(dtypes, dtypes):
op = core.CreateOperator(
'CreateMap',
[],
['map'],
key_dtype=key_dtype,
value_dtype=value_dtype,
)
workspace.RunOperatorOnce(op)
self.assertTrue(workspace.HasBlob('map'))
def test_map(self):
def test_map_func(KEY_T, VALUE_T):
model_file = os.path.join(tempfile.mkdtemp(), 'db')
key_data = np.asarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=KEY_T)
value_data = np.asarray([2, 3, 3, 3, 3, 2, 3, 3, 3, 3], dtype=VALUE_T)
workspace.FeedBlob("key_data", key_data)
workspace.FeedBlob("value_data", value_data)
save_net = core.Net("save_net")
save_net.KeyValueToMap(["key_data", "value_data"], "map_data")
save_net.Save(
["map_data"], [],
db=model_file,
db_type="minidb",
absolute_path=True
)
workspace.RunNetOnce(save_net)
workspace.ResetWorkspace()
load_net = core.Net("load_net")
load_net.Load(
[], ["map_data"],
db=model_file,
db_type="minidb",
load_all=True,
absolute_path=True
)
load_net.MapToKeyValue("map_data", ["key_data", "value_data"])
workspace.RunNetOnce(load_net)
key_data2 = workspace.FetchBlob("key_data")
value_data2 = workspace.FetchBlob("value_data")
assert(set(zip(key_data, value_data)) == set(zip(key_data2, value_data2)))
test_map_func(np.int64, np.int64)
test_map_func(np.int64, np.int32)
test_map_func(np.int32, np.int32)
test_map_func(np.int32, np.int64)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/map_ops_test.py
|
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
def _one_hots():
index_size = st.integers(min_value=1, max_value=5)
lengths = st.lists(
elements=st.integers(min_value=0, max_value=5))
return st.tuples(index_size, lengths).flatmap(
lambda x: st.tuples(
st.just(x[0]),
st.just(x[1]),
st.lists(
elements=st.integers(min_value=0, max_value=x[0] - 1),
min_size=sum(x[1]),
max_size=sum(x[1]))))
class TestOneHotOps(serial.SerializedTestCase):
@serial.given(
x=hu.tensor(
min_dim=2, max_dim=2, dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
**hu.gcs_cpu_only)
def test_batch_one_hot(self, x, gc, dc):
d = x.shape[1]
lens = []
vals = []
for i in range(0, d):
val = np.unique(x[:, i])
vals.extend(val)
lens.append(len(val))
lens = np.array(lens, dtype=np.int32)
vals = np.array(vals, dtype=np.int32)
def ref(x, lens, vals):
output_dim = vals.size
ret = np.zeros((x.shape[0], output_dim)).astype(x.dtype)
p = 0
for i, l in enumerate(lens):
for j in range(0, l):
v = vals[p + j]
ret[x[:, i] == v, p + j] = 1
p += lens[i]
return (ret, )
op = core.CreateOperator('BatchOneHot', ["X", "LENS", "VALS"], ["Y"])
self.assertReferenceChecks(gc, op, [x, lens, vals], ref)
@given(
x=hu.tensor(
min_dim=2, max_dim=2, dtype=np.float32,
elements=st.integers(min_value=-5, max_value=5)),
seed=st.integers(min_value=0, max_value=1000),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_batch_bucketized_one_hot(self, x, seed, gc, dc):
np.random.seed(seed)
d = x.shape[1]
lens = np.random.randint(low=1, high=5, size=d)
boundaries = []
for i in range(d):
# add [0, 0] as duplicated boundary for duplicated bucketization
if lens[i] > 2:
cur_boundary = np.append(
np.random.randn(lens[i] - 2) * 5, [0, 0])
else:
cur_boundary = np.random.randn(lens[i]) * 5
cur_boundary.sort()
boundaries += cur_boundary.tolist()
lens = np.array(lens, dtype=np.int32)
boundaries = np.array(boundaries, dtype=np.float32)
def ref(x, lens, boundaries):
output_dim = lens.size + boundaries.size
ret = np.zeros((x.shape[0], output_dim)).astype(x.dtype)
boundary_offset = 0
output_offset = 0
for i, l in enumerate(lens):
bucket_idx_right = np.digitize(
x[:, i],
boundaries[boundary_offset:boundary_offset + l],
right=True
)
bucket_idx_left = np.digitize(
x[:, i],
boundaries[boundary_offset:boundary_offset + l],
right=False
)
bucket_idx = np.floor_divide(
np.add(bucket_idx_right, bucket_idx_left), 2)
for j in range(x.shape[0]):
ret[j, output_offset + bucket_idx[j]] = 1.0
boundary_offset += lens[i]
output_offset += (lens[i] + 1)
return (ret, )
op = core.CreateOperator('BatchBucketOneHot',
["X", "LENS", "BOUNDARIES"], ["Y"])
self.assertReferenceChecks(gc, op, [x, lens, boundaries], ref)
@serial.given(
hot_indices=hu.tensor(
min_dim=1, max_dim=1, dtype=np.int64,
elements=st.integers(min_value=0, max_value=42)),
end_padding=st.integers(min_value=0, max_value=2),
**hu.gcs)
def test_one_hot(self, hot_indices, end_padding, gc, dc):
def one_hot_ref(hot_indices, size):
out = np.zeros([len(hot_indices), size], dtype=float)
x = enumerate(hot_indices)
for i, x in enumerate(hot_indices):
out[i, x] = 1.
return (out, )
size = np.array(max(hot_indices) + end_padding + 1, dtype=np.int64)
if size == 0:
size = 1
op = core.CreateOperator('OneHot', ['hot_indices', 'size'], ['output'])
self.assertReferenceChecks(
gc,
op,
[hot_indices, size],
one_hot_ref,
input_device_options={'size': core.DeviceOption(caffe2_pb2.CPU)})
@serial.given(hot_indices=_one_hots())
def test_segment_one_hot(self, hot_indices):
index_size, lengths, indices = hot_indices
index_size = np.array(index_size, dtype=np.int64)
lengths = np.array(lengths, dtype=np.int32)
indices = np.array(indices, dtype=np.int64)
def segment_one_hot_ref(lengths, hot_indices, size):
offset = 0
out = np.zeros([len(lengths), size], dtype=float)
for i, length in enumerate(lengths):
for idx in hot_indices[offset:offset + length]:
out[i, idx] = 1.
offset += length
return (out, )
op = core.CreateOperator(
'SegmentOneHot',
['lengths', 'hot_indices', 'size'],
['output'])
self.assertReferenceChecks(
hu.cpu_do,
op,
[lengths, indices, index_size],
segment_one_hot_ref)
@given(
x=hu.tensor(
min_dim=2, max_dim=2, dtype=np.float32,
elements=st.integers(min_value=-5, max_value=5)),
seed=st.integers(min_value=0, max_value=1000),
**hu.gcs_cpu_only)
def test_batch_bucket_one_hot_shape_inference(self, x, seed, gc, dc):
np.random.seed(seed)
d = x.shape[1]
lens = np.random.randint(low=1, high=5, size=d)
boundaries = []
for i in range(d):
# add [0, 0] as duplicated boundary for duplicated bucketization
if lens[i] > 2:
cur_boundary = np.append(
np.random.randn(lens[i] - 2) * 5, [0, 0])
else:
cur_boundary = np.random.randn(lens[i]) * 5
cur_boundary.sort()
boundaries += cur_boundary.tolist()
lens = np.array(lens, dtype=np.int32)
boundaries = np.array(boundaries, dtype=np.float32)
workspace.FeedBlob('lens', lens)
workspace.FeedBlob('boundaries', boundaries)
workspace.FeedBlob('x', x)
net = core.Net("batch_bucket_one_hot_test")
result = net.BatchBucketOneHot(["x", "lens", "boundaries"], 1)
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertEqual(shapes[result], list(workspace.blobs[result].shape))
self.assertEqual(
shapes[result], [x.shape[0], lens.shape[0] + boundaries.shape[0]])
self.assertEqual(types[result], core.DataType.FLOAT)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/one_hot_ops_test.py
|
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import math
MAX_TEST_EMBEDDING_SIZE = 20
MAX_TEST_SEQUENCE_LENGTH = 10
MAX_TEST_BATCH_SIZE = 5
MIN_TEST_ALPHA = 5000.0
MAX_TEST_ALPHA = 20000.0
MIN_TEST_AMPLITUDE = 0.1
MAX_TEST_AMPLITUDE = 10.0
class TestSinusoidPositionEncodingOp(serial.SerializedTestCase):
@given(
positions_vec=hu.arrays(
dims=[MAX_TEST_SEQUENCE_LENGTH],
dtype=np.int32,
elements=st.integers(1, MAX_TEST_SEQUENCE_LENGTH)
),
embedding_size=st.integers(1, MAX_TEST_EMBEDDING_SIZE),
batch_size=st.integers(1, MAX_TEST_BATCH_SIZE),
alpha=st.floats(MIN_TEST_ALPHA, MAX_TEST_ALPHA),
amplitude=st.floats(MIN_TEST_AMPLITUDE, MAX_TEST_AMPLITUDE),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_sinusoid_embedding(
self, positions_vec, embedding_size, batch_size, alpha, amplitude, gc, dc
):
positions = np.tile(positions_vec, [batch_size, 1]).transpose()
op = core.CreateOperator(
"SinusoidPositionEncoding",
["positions"],
["output"],
embedding_size=embedding_size,
alpha=alpha,
amplitude=amplitude,
)
def sinusoid_encoding(dim, position):
x = 1. * position / math.pow(alpha, 1. * dim / embedding_size)
if dim % 2 == 0:
return amplitude * math.sin(x)
else:
return amplitude * math.cos(x)
def sinusoid_embedding_op(positions):
output_shape = (len(positions), len(positions[0]), embedding_size)
ar = np.zeros(output_shape)
for i, position_vector in enumerate(positions):
for j, position in enumerate(position_vector):
for k in range(embedding_size):
ar[i, j, k] = sinusoid_encoding(k, position)
return [ar]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[positions],
reference=sinusoid_embedding_op,
)
|
pytorch-master
|
caffe2/python/operator_test/sinusoid_position_encoding_op_test.py
|
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given, settings
class TestHistogram(hu.HypothesisTestCase):
@given(rows=st.integers(1, 1000), cols=st.integers(1, 1000), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_histogram__device_consistency(self, rows, cols, gc, dc):
X = np.random.rand(rows, cols)
bin_edges = list(np.linspace(-2, 10, num=10000))
op = core.CreateOperator("Histogram", ["X"], ["histogram"], bin_edges=bin_edges)
self.assertDeviceChecks(dc, op, [X], [0])
def test_histogram__valid_inputs_0(self):
workspace.FeedBlob(
"X", np.array([-2.0, -2.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 9.0])
)
bin_edges = [-2.0, -1.0, 0.0, 2.0, 5.0, 9.0]
net = core.Net("test_net")
net.Histogram(["X"], ["histogram"], bin_edges=bin_edges)
workspace.RunNetOnce(net)
histogram_blob = workspace.FetchBlob("histogram")
assert list(histogram_blob) == [2, 0, 4, 3, 1]
@given(num_tensors=st.integers(1, 5), num_bin_edges=st.integers(2, 10000))
@settings(deadline=10000)
def test_histogram__valid_inputs_1(self, num_tensors, num_bin_edges):
self._test_histogram(
[
np.random.rand(np.random.randint(1, 1000), np.random.randint(1, 1000))
for __ in range(num_tensors)
],
list(np.logspace(-12, 5, num=num_bin_edges)),
)
def test_histogram__empty_input_tensor(self):
self._test_histogram([np.array([])], list(np.linspace(-2, 2, num=10)))
def test_histogram__non_increasing_bin_edges(self):
with self.assertRaisesRegex(
RuntimeError, "bin_edges must be a strictly increasing sequence of values"
):
self._test_histogram(
[np.random.rand(100), np.random.rand(98)], [0.0, 0.2, 0.1, 0.1]
)
def test_histogram__insufficient_bin_edges(self):
with self.assertRaisesRegex(
RuntimeError, "Number of bin edges must be greater than or equal to 2"
):
self._test_histogram([np.random.rand(111)], [1.0])
def _test_histogram(self, tensors, bin_edges):
total_size = 0
input_blob_names = []
for idx, tensor in enumerate(tensors):
total_size += np.size(tensor)
tensor_blob_name = f"X{idx}"
workspace.FeedBlob(tensor_blob_name, tensor)
input_blob_names.append(tensor_blob_name)
output_name = "histogram"
net = core.Net("test_net")
net.Histogram(input_blob_names, [output_name], bin_edges=bin_edges)
workspace.RunNetOnce(net)
histogram_blob = workspace.FetchBlob(output_name)
assert np.size(histogram_blob) == len(bin_edges) - 1
assert np.sum(histogram_blob) == total_size
if __name__ == "__main__":
global_options = ["caffe2"]
core.GlobalInit(global_options)
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/histogram_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestGroupNormOp(serial.SerializedTestCase):
def group_norm_nchw_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
D = int(C / G)
X = X.reshape(N, G, D, -1)
mu = np.mean(X, axis=(2, 3), keepdims=True)
std = np.sqrt((np.var(X, axis=(2, 3), keepdims=True) + epsilon))
gamma = gamma.reshape(G, D, 1)
beta = beta.reshape(G, D, 1)
Y = gamma * (X - mu) / std + beta
return [Y.reshape(dims), mu.reshape(N, G), (1.0 / std).reshape(N, G)]
def group_norm_nhwc_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[-1]
G = group
D = int(C / G)
X = X.reshape(N, -1, G, D)
mu = np.mean(X, axis=(1, 3), keepdims=True)
std = np.sqrt((np.var(X, axis=(1, 3), keepdims=True) + epsilon))
gamma = gamma.reshape(G, D)
beta = beta.reshape(G, D)
Y = gamma * (X - mu) / std + beta
return [Y.reshape(dims), mu.reshape(N, G), (1.0 / std).reshape(N, G)]
@serial.given(
N=st.integers(1, 5), G=st.integers(1, 5), D=st.integers(1, 5),
H=st.integers(2, 5), W=st.integers(2, 5),
epsilon=st.floats(min_value=1e-5, max_value=1e-4),
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
def test_group_norm_2d(
self, N, G, D, H, W, epsilon, order, gc, dc):
op = core.CreateOperator(
"GroupNorm",
["X", "gamma", "beta"],
["Y", "mean", "inv_std"],
group=G,
epsilon=epsilon,
order=order,
)
C = G * D
if order == "NCHW":
X = np.random.randn(N, C, H, W).astype(np.float32) + 1.0
else:
X = np.random.randn(N, H, W, C).astype(np.float32) + 1.0
gamma = np.random.randn(C).astype(np.float32)
beta = np.random.randn(C).astype(np.float32)
inputs = [X, gamma, beta]
def ref_op(X, gamma, beta):
if order == "NCHW":
return self.group_norm_nchw_ref(X, gamma, beta, G, epsilon)
else:
return self.group_norm_nhwc_ref(X, gamma, beta, G, epsilon)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref_op,
threshold=5e-3,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@given(N=st.integers(1, 5), G=st.integers(1, 3), D=st.integers(2, 3),
T=st.integers(2, 4), H=st.integers(2, 4), W=st.integers(2, 4),
epsilon=st.floats(min_value=1e-5, max_value=1e-4),
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
def test_group_norm_3d(
self, N, G, D, T, H, W, epsilon, order, gc, dc):
op = core.CreateOperator(
"GroupNorm",
["X", "gamma", "beta"],
["Y", "mean", "inv_std"],
group=G,
epsilon=epsilon,
order=order,
)
C = G * D
if order == "NCHW":
X = np.random.randn(N, C, T, H, W).astype(np.float32) + 1.0
else:
X = np.random.randn(N, T, H, W, C).astype(np.float32) + 1.0
gamma = np.random.randn(C).astype(np.float32)
beta = np.random.randn(C).astype(np.float32)
inputs = [X, gamma, beta]
def ref_op(X, gamma, beta):
if order == "NCHW":
return self.group_norm_nchw_ref(X, gamma, beta, G, epsilon)
else:
return self.group_norm_nhwc_ref(X, gamma, beta, G, epsilon)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref_op,
threshold=5e-3,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@given(N=st.integers(1, 5), G=st.integers(1, 5), D=st.integers(2, 2),
H=st.integers(2, 5), W=st.integers(2, 5),
epsilon=st.floats(min_value=1e-5, max_value=1e-4),
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
@settings(deadline=10000)
def test_group_norm_grad(
self, N, G, D, H, W, epsilon, order, gc, dc):
op = core.CreateOperator(
"GroupNorm",
["X", "gamma", "beta"],
["Y", "mean", "inv_std"],
group=G,
epsilon=epsilon,
order=order,
)
C = G * D
X = np.arange(N * C * H * W).astype(np.float32)
np.random.shuffle(X)
if order == "NCHW":
X = X.reshape((N, C, H, W))
else:
X = X.reshape((N, H, W, C))
gamma = np.random.randn(C).astype(np.float32)
beta = np.random.randn(C).astype(np.float32)
inputs = [X, gamma, beta]
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/group_norm_op_test.py
|
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
from hypothesis import strategies as st
import numpy as np
import time
class TestTensorPackOps(serial.SerializedTestCase):
def pack_segments_ref(self, return_presence_mask=False, max_length=None):
def pack_segments_ref(lengths, data, max_length=max_length):
arr = []
constant_values = 0
if data.dtype.char == 'S':
constant_values = ''
if max_length is None:
max_length = np.max(lengths)
start = 0
for idx in range(np.size(lengths)):
len = lengths[idx] if max_length >= lengths[idx] else max_length
chunk = data[start : start + len]
pad_length = max_length - len
# ((0, pad_length), (0, 0)) says add pad_length rows of padding
# below chunk and 0 rows of padding elsewhere
arr.append(
np.pad(
chunk, ((0, pad_length), (0, 0)),
mode=str("constant"),
constant_values=constant_values
)
)
start += lengths[idx]
result = [arr]
if return_presence_mask:
presence_arr = []
for length in lengths:
length = length if max_length >= length else max_length
pad_length = max_length - length
presence_arr.append(
np.pad(
np.ones((length), dtype=np.bool), ((0, pad_length)),
mode=str("constant")
)
)
result.append(presence_arr)
return result
return pack_segments_ref
@given(
num_seq=st.integers(10, 100),
cell_size=st.integers(1, 10),
max_length_buffer=st.integers(-5, 5),
**hu.gcs
)
@settings(deadline=None, max_examples=50)
def test_pack_with_max_length_ops(
self, num_seq, cell_size, max_length_buffer, gc, dc
):
# create data
lengths = np.arange(num_seq, dtype=np.int32) + 1
num_cell = np.sum(lengths)
data = np.zeros(num_cell * cell_size, dtype=np.float32)
left = np.cumsum(np.arange(num_seq) * cell_size)
right = np.cumsum(lengths * cell_size)
for i in range(num_seq):
data[left[i]:right[i]] = i + 1.0
data.resize(num_cell, cell_size)
print("\nnum seq:{}, num cell: {}, cell size:{}\n".format(
num_seq, num_cell, cell_size)
+ "=" * 60
)
# run test
max_length = num_seq + max_length_buffer
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'], max_length=max_length)
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
start = time.time()
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[lengths, data, max_length],
reference=self.pack_segments_ref(max_length=max_length),
)
end = time.time()
print("{} used time: {}".format(gc, end - start).replace('\n', ' '))
with core.DeviceScope(gc):
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
workspace.RunOperatorOnce(core.CreateOperator(
'PackSegments',
['l', 'd'],
['t'],
max_length=max_length,
device_option=gc))
workspace.RunOperatorOnce(core.CreateOperator(
'UnpackSegments',
['l', 't'],
['newd'],
max_length=max_length,
device_option=gc))
assert(workspace.FetchBlob('t').shape[1] == max_length)
def _cal_unpacked_data(data):
if max_length >= num_seq:
return data
output = None
start = 0
for i, length in enumerate(lengths):
new_len = max_length if length > max_length else length
chunk = data[start: start + new_len]
if output is None:
output = chunk
else:
output = np.concatenate((output, chunk), axis=0)
start += length
return output
true_newd = _cal_unpacked_data(workspace.FetchBlob('d'))
assert((workspace.FetchBlob('newd') == true_newd).all())
@given(
num_seq=st.integers(10, 500),
cell_size=st.integers(1, 10),
**hu.gcs
)
@settings(deadline=10000)
def test_pack_ops(self, num_seq, cell_size, gc, dc):
# create data
lengths = np.arange(num_seq, dtype=np.int32) + 1
num_cell = np.sum(lengths)
data = np.zeros(num_cell * cell_size, dtype=np.float32)
left = np.cumsum(np.arange(num_seq) * cell_size)
right = np.cumsum(lengths * cell_size)
for i in range(num_seq):
data[left[i]:right[i]] = i + 1.0
data.resize(num_cell, cell_size)
print("\nnum seq:{}, num cell: {}, cell size:{}\n".format(
num_seq, num_cell, cell_size)
+ "=" * 60
)
# run test
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'])
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
start = time.time()
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[lengths, data],
reference=self.pack_segments_ref(),
)
end = time.time()
print("{} used time: {}".format(gc, end - start).replace('\n', ' '))
with core.DeviceScope(gc):
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
workspace.RunOperatorOnce(core.CreateOperator(
'PackSegments',
['l', 'd'],
['t'],
device_option=gc))
workspace.RunOperatorOnce(core.CreateOperator(
'UnpackSegments',
['l', 't'],
['newd'],
device_option=gc))
assert((workspace.FetchBlob('newd') == workspace.FetchBlob('d')).all())
@given(
**hu.gcs_cpu_only
)
def test_pack_ops_str(self, gc, dc):
# GPU does not support string. Test CPU implementation only.
workspace.FeedBlob('l', np.array([1, 2, 3], dtype=np.int64))
strs = np.array([
["a", "a"],
["b", "b"],
["bb", "bb"],
["c", "c"],
["cc", "cc"],
["ccc", "ccc"]],
dtype='|S')
workspace.FeedBlob('d', strs)
workspace.RunOperatorOnce(core.CreateOperator(
'PackSegments',
['l', 'd'],
['t'],
device_option=gc))
workspace.RunOperatorOnce(core.CreateOperator(
'UnpackSegments',
['l', 't'],
['newd'],
device_option=gc))
assert((workspace.FetchBlob('newd') == workspace.FetchBlob('d')).all())
def test_pad_minf(self):
workspace.FeedBlob('l', np.array([1, 2, 3], dtype=np.int32))
workspace.FeedBlob(
'd',
np.array([
[1.0, 1.1],
[2.0, 2.1],
[2.2, 2.2],
[3.0, 3.1],
[3.2, 3.3],
[3.4, 3.5]],
dtype=np.float32))
workspace.RunOperatorOnce(core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'], pad_minf=True))
workspace.RunOperatorOnce(core.CreateOperator(
'Exp', ['t'], ['r']
))
result = workspace.FetchBlob('t')
assert(result[0, -1, 0] < -1000.0)
# The whole point of padding with -inf is that when we exponentiate it
# then it should be zero.
exponentiated = workspace.FetchBlob('r')
assert(exponentiated[0, -1, 0] == 0.0)
def test_pad_no_minf(self):
workspace.FeedBlob('l', np.array([1, 2, 3], dtype=np.int32))
workspace.FeedBlob(
'd',
np.array([
[1.0, 1.1],
[2.0, 2.1],
[2.2, 2.2],
[3.0, 3.1],
[3.2, 3.3],
[3.4, 3.5]],
dtype=np.float32))
workspace.RunOperatorOnce(
core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'], pad_minf=False),
)
result = workspace.FetchBlob('t')
assert(result[0, -1, 0] == 0.0)
workspace.FeedBlob(
'i',
np.array([
[1, 1],
[2, 2],
[2, 2],
[3, 3],
[3, 3],
[3, 3]],
dtype=np.int32))
workspace.RunOperatorOnce(
core.CreateOperator(
'PackSegments', ['l', 'i'], ['t2'], pad_minf=False),
)
result = workspace.FetchBlob('t2')
assert(result[0, -1, 0] == 0)
@given(**hu.gcs)
def test_presence_mask(self, gc, dc):
lengths = np.array([1, 2, 3], dtype=np.int32)
data = np.array(
[
[1.0, 1.0], [2.0, 2.0], [2.0, 2.0], [3.0, 3.0], [3.0, 3.0],
[3.0, 3.0]
],
dtype=np.float32
)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t', 'p'], return_presence_mask=True
)
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
inputs = [lengths, data]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=self.pack_segments_ref(return_presence_mask=True),
)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t', 'p'], return_presence_mask=True
)
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('t')
expected_output_shape = (3, 3, 2)
self.assertEquals(output.shape, expected_output_shape)
presence_mask = workspace.FetchBlob('p')
expected_presence_mask = np.array(
[[True, False, False], [True, True, False], [True, True, True]],
dtype=np.bool
)
self.assertEqual(presence_mask.shape, expected_presence_mask.shape)
np.testing.assert_array_equal(presence_mask, expected_presence_mask)
def test_presence_mask_empty(self):
lengths = np.array([], dtype=np.int32)
data = np.array([], dtype=np.float32)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t', 'p'], return_presence_mask=True
)
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('p')
expected_output_shape = (0, 0)
self.assertEquals(output.shape, expected_output_shape)
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_out_of_bounds(self, gc, dc):
# Copy pasted from test_pack_ops but with 3 changed to 4
lengths = np.array([1, 2, 4], dtype=np.int32)
data = np.array([
[1.0, 1.0],
[2.0, 2.0],
[2.0, 2.0],
[3.0, 3.0],
[3.0, 3.0],
[3.0, 3.0]], dtype=np.float32)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'])
inputs = [lengths, data]
self.assertRunOpRaises(
device_option=gc,
op=op,
inputs=inputs,
exception=RuntimeError
)
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_under_bounds(self, gc, dc):
# Copy pasted from test_pack_ops but with 3 changed to 2
lengths = np.array([1, 2, 2], dtype=np.int32)
data = np.array([
[1.0, 1.0],
[2.0, 2.0],
[2.0, 2.0],
[3.0, 3.0],
[3.0, 3.0],
[3.0, 3.0]], dtype=np.float32)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'])
inputs = [lengths, data]
self.assertRunOpRaises(
device_option=gc,
op=op,
inputs=inputs,
exception=RuntimeError
)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/pack_ops_test.py
|
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from hypothesis import assume, given, settings, HealthCheck
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
class TestFcOperator(serial.SerializedTestCase):
def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):
if dtype == np.float16:
# fp16 only supported with CUDA/HIP
assume(core.IsGPUDeviceType(gc.device_type))
dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
if engine == 'TENSORCORE':
# TensorCore only makes sense with CUDA
assume(gc.device_type == caffe2_pb2.CUDA)
# ensures TensorCore kernels can be called
m *= 8
k *= 8
n *= 8
X = np.random.rand(m, k).astype(dtype) - 0.5
if multi_dim:
if transposed:
W = np.random.rand(k, n, 1, 1).astype(dtype) - 0.5
else:
W = np.random.rand(n, k, 1, 1).astype(dtype) - 0.5
else:
if transposed:
W = np.random.rand(k, n).astype(dtype) - 0.5
else:
W = np.random.rand(n, k).astype(dtype) - 0.5
b = np.random.rand(n).astype(dtype) - 0.5
def fc_op(X, W, b):
return [np.dot(X, W.reshape(n, k).transpose()) + b.reshape(n)]
def fc_transposed_op(X, W, b):
return [np.dot(X, W.reshape(k, n)) + b.reshape(n)]
op = core.CreateOperator(
'FCTransposed' if transposed else 'FC',
['X', 'W', 'b'],
'out',
engine=engine,
)
if dtype == np.float16 and core.IsGPUDeviceType(gc.device_type):
a = caffe2_pb2.Argument()
a.i = 1
a.name = "float16_compute"
op.arg.extend([a])
# Check against numpy reference
# ReferenceChecks is flaky, Relaxing to 1e-3.
threshold = 1e-3
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, W, b],
reference=fc_transposed_op if transposed else fc_op,
threshold=threshold
)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, W, b], [0])
# Gradient checks
threshold = 0.5 if dtype == np.float16 else 0.005
stepsize = 0.5 if dtype == np.float16 else 0.05
for i in range(3):
self.assertGradientChecks(gc, op, [X, W, b], i, [0],
threshold=threshold, stepsize=stepsize)
@settings(max_examples=50, suppress_health_check=[HealthCheck.filter_too_much])
@serial.given(n=st.integers(1, 5),
m=st.integers(0, 5),
k=st.integers(1, 5),
multi_dim=st.sampled_from([True, False]),
dtype=st.sampled_from([np.float32, np.float16]),
engine=st.sampled_from(['', 'TENSORCORE']),
**hu.gcs)
def test_fc(self, **kwargs):
self._run_test(transposed=False, **kwargs)
@settings(max_examples=50, suppress_health_check=[HealthCheck.filter_too_much])
@given(n=st.integers(1, 5),
m=st.integers(0, 5),
k=st.integers(1, 5),
multi_dim=st.sampled_from([True, False]),
dtype=st.sampled_from([np.float32, np.float16]),
engine=st.sampled_from(['', 'TENSORCORE']),
**hu.gcs)
def test_fc_transposed(self, **kwargs):
self._run_test(transposed=True, **kwargs)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/fc_operator_test.py
|
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestDropout(serial.SerializedTestCase):
@serial.given(X=hu.tensor(),
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs_cpu_only)
def test_dropout_is_test(self, X, in_place, ratio, engine, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
# TODO(lukeyeager): enable this path when the GPU path is fixed
if in_place:
# Skip if trying in-place on GPU
assume(not (gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} and engine == ''))
# If in-place on CPU, don't compare with GPU
dc = dc[:1]
op = core.CreateOperator("Dropout", ["X"],
["X" if in_place else "Y"],
ratio=ratio, engine=engine, is_test=True)
self.assertDeviceChecks(dc, op, [X], [0])
# No sense in checking gradients for test phase
def reference_dropout_test(x):
return x, np.ones(x.shape, dtype=np.bool)
self.assertReferenceChecks(
gc, op, [X], reference_dropout_test,
# The 'mask' output may be uninitialized
outputs_to_check=[0])
@given(X=hu.tensor(),
in_place=st.booleans(),
output_mask=st.booleans(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_dropout_ratio0(self, X, in_place, output_mask, engine, gc, dc):
"""Test with ratio=0 for a deterministic reference impl."""
# TODO(lukeyeager): enable this path when the op is fixed
if in_place:
# Skip if trying in-place on GPU
assume(gc.device_type not in {caffe2_pb2.CUDA, caffe2_pb2.HIP})
# If in-place on CPU, don't compare with GPU
dc = dc[:1]
is_test = not output_mask
op = core.CreateOperator("Dropout", ["X"],
["X" if in_place else "Y"] +
(["mask"] if output_mask else []),
ratio=0.0, engine=engine,
is_test=is_test)
self.assertDeviceChecks(dc, op, [X], [0])
if not is_test:
self.assertGradientChecks(gc, op, [X], 0, [0])
def reference_dropout_ratio0(x):
return (x,) if is_test else (x, np.ones(x.shape, dtype=np.bool))
self.assertReferenceChecks(
gc, op, [X], reference_dropout_ratio0,
# Don't check the mask with cuDNN because it's packed data
outputs_to_check=None if engine != 'CUDNN' else [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
output_mask=st.booleans(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_dropout_ratio1(self, X, in_place, output_mask, engine, gc, dc):
"""Test with ratio=0 for a deterministic reference impl."""
if in_place:
# Skip if trying in-place on GPU
assume(gc.device_type not in {caffe2_pb2.CUDA, caffe2_pb2.HIP})
# If in-place on CPU, don't compare with GPU
dc = dc[:1]
is_test = not output_mask
op = core.CreateOperator("Dropout", ["X"],
["X" if in_place else "Y"] +
(["mask"] if output_mask else []),
ratio=1.0, engine=engine,
is_test=is_test)
self.assertDeviceChecks(dc, op, [X], [0])
if not is_test:
self.assertGradientChecks(gc, op, [X], 0, [0])
def reference_dropout_ratio1(x):
return (x,) if is_test else (np.zeros(x.shape, dtype=np.float), np.zeros(x.shape, dtype=np.bool))
self.assertReferenceChecks(
gc, op, [X], reference_dropout_ratio1,
# Don't check the mask with cuDNN because it's packed data
outputs_to_check=None if engine != 'CUDNN' else [0])
|
pytorch-master
|
caffe2/python/operator_test/dropout_op_test.py
|
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
class TestDataCoupleOp(TestCase):
def test_data_couple_op(self):
param_array = np.random.rand(10, 10)
gradient_array = np.random.rand(10, 10)
extra_array = np.random.rand(10, 10)
workspace.FeedBlob("param", param_array)
workspace.FeedBlob("gradient", gradient_array)
workspace.FeedBlob("extraBlob", extra_array)
workspace.RunOperatorOnce(core.CreateOperator(
"DataCouple",
["param", "gradient", "extraBlob"],
["param", "gradient"]))
result1 = workspace.FetchBlob('param')
result2 = workspace.FetchBlob('gradient')
self.assertFalse((result1 - param_array).any())
self.assertFalse((result2 - gradient_array).any())
|
pytorch-master
|
caffe2/python/operator_test/data_couple_op_test.py
|
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestTopK(serial.SerializedTestCase):
def top_k_ref(self, X, k, flatten_indices, axis=-1):
in_dims = X.shape
out_dims = list(in_dims)
out_dims[axis] = k
out_dims = tuple(out_dims)
if axis == -1:
axis = len(in_dims) - 1
prev_dims = 1
next_dims = 1
for i in range(axis):
prev_dims *= in_dims[i]
for i in range(axis + 1, len(in_dims)):
next_dims *= in_dims[i]
n = in_dims[axis]
X_flat = X.reshape((prev_dims, n, next_dims))
values_ref = np.ndarray(
shape=(prev_dims, k, next_dims), dtype=np.float32)
values_ref.fill(0)
indices_ref = np.ndarray(
shape=(prev_dims, k, next_dims), dtype=np.int64)
indices_ref.fill(-1)
flatten_indices_ref = np.ndarray(
shape=(prev_dims, k, next_dims), dtype=np.int64)
flatten_indices_ref.fill(-1)
for i in range(prev_dims):
for j in range(next_dims):
kv = []
for x in range(n):
val = X_flat[i, x, j]
y = x * next_dims + i * in_dims[axis] * next_dims + j
kv.append((val, x, y))
cnt = 0
for val, x, y in sorted(
kv, key=lambda x: (x[0], -x[1]), reverse=True):
values_ref[i, cnt, j] = val
indices_ref[i, cnt, j] = x
flatten_indices_ref[i, cnt, j] = y
cnt += 1
if cnt >= k or cnt >= n:
break
values_ref = values_ref.reshape(out_dims)
indices_ref = indices_ref.reshape(out_dims)
flatten_indices_ref = flatten_indices_ref.flatten()
if flatten_indices:
return (values_ref, indices_ref, flatten_indices_ref)
else:
return (values_ref, indices_ref)
@serial.given(
X=hu.tensor(),
flatten_indices=st.booleans(),
seed=st.integers(0, 10),
**hu.gcs
)
def test_top_k(self, X, flatten_indices, seed, gc, dc):
X = X.astype(dtype=np.float32)
np.random.seed(seed)
# `k` can be larger than the total size
k = np.random.randint(1, X.shape[-1] + 4)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(bs=st.integers(1, 3), n=st.integers(1, 1), k=st.integers(1, 1),
flatten_indices=st.booleans(), **hu.gcs)
def test_top_k_1(self, bs, n, k, flatten_indices, gc, dc):
X = np.random.rand(bs, n).astype(dtype=np.float32)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(bs=st.integers(1, 3), n=st.integers(1, 10000), k=st.integers(1, 1),
flatten_indices=st.booleans(), **hu.gcs)
def test_top_k_2(self, bs, n, k, flatten_indices, gc, dc):
X = np.random.rand(bs, n).astype(dtype=np.float32)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(bs=st.integers(1, 3), n=st.integers(1, 10000),
k=st.integers(1, 1024), flatten_indices=st.booleans(), **hu.gcs)
def test_top_k_3(self, bs, n, k, flatten_indices, gc, dc):
X = np.random.rand(bs, n).astype(dtype=np.float32)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(bs=st.integers(1, 3), n=st.integers(100, 10000),
flatten_indices=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_top_k_4(self, bs, n, flatten_indices, gc, dc):
k = np.random.randint(n // 3, 3 * n // 4)
X = np.random.rand(bs, n).astype(dtype=np.float32)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(bs=st.integers(1, 3), n=st.integers(1, 1024),
flatten_indices=st.booleans(), **hu.gcs)
def test_top_k_5(self, bs, n, flatten_indices, gc, dc):
k = n
X = np.random.rand(bs, n).astype(dtype=np.float32)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(bs=st.integers(1, 3), n=st.integers(1, 5000),
flatten_indices=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_top_k_6(self, bs, n, flatten_indices, gc, dc):
k = n
X = np.random.rand(bs, n).astype(dtype=np.float32)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(X=hu.tensor(dtype=np.float32), k=st.integers(1, 5),
axis=st.integers(-1, 5), flatten_indices=st.booleans(),
**hu.gcs)
def test_top_k_axis(self, X, k, axis, flatten_indices, gc, dc):
dims = X.shape
if axis >= len(dims):
axis %= len(dims)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator(
"TopK", ["X"], output_list, k=k, axis=axis, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices, axis)
self.assertReferenceChecks(gc, op, [X], bind_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(X=hu.tensor(dtype=np.float32), k=st.integers(1, 5),
axis=st.integers(-1, 5), **hu.gcs)
@settings(deadline=10000)
def test_top_k_grad(self, X, k, axis, gc, dc):
dims = X.shape
if axis >= len(dims):
axis %= len(dims)
input_axis = len(dims) - 1 if axis == -1 else axis
prev_dims = 1
next_dims = 1
for i in range(input_axis):
prev_dims *= dims[i]
for i in range(input_axis + 1, len(dims)):
next_dims *= dims[i]
X_flat = X.reshape((prev_dims, dims[input_axis], next_dims))
for i in range(prev_dims):
for j in range(next_dims):
# this try to make sure adding stepsize (0.05)
# will not change TopK selections at all
X_flat[i, :, j] = np.arange(dims[axis], dtype=np.float32) / 5
np.random.shuffle(X_flat[i, :, j])
X = X_flat.reshape(dims)
op = core.CreateOperator(
"TopK", ["X"], ["Values", "Indices"], k=k, axis=axis,
device_option=gc)
self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=0.05)
|
pytorch-master
|
caffe2/python/operator_test/top_k_test.py
|
import functools
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestDecayAdagrad(hu.HypothesisTestCase):
@staticmethod
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
t = ITER + 1
mom1_out = (beta1 * mom1) + (1 - beta1) * grad
mom2_out = mom2 + np.square(grad)
if bias_correction_first:
c = 1 - np.power(beta1, t)
else:
c = 1.0
grad_out = mom1_out / c / (np.sqrt(mom2_out) + epsilon) + weight_decay * param
param_out = param + LR * grad_out
return param_out, mom1_out, mom2_out
@given(inputs=hu.tensors(n=4),
ITER=st.integers(min_value=0, max_value=10000),
LR=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta1=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta2=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
weight_decay=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
def test_decay_adagrad(self, inputs, ITER, LR, beta1, beta2, epsilon, weight_decay, gc, dc):
bias_correction_first = True
param, mom1, mom2, grad = inputs
mom2 = np.abs(mom2)
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
op = core.CreateOperator(
"DecayAdagrad",
["param", "mom1", "mom2", "grad", "lr", "iter"],
["output_param", "output_mom1", "output_mom2"],
beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, bias_correction_first=bias_correction_first)
# Iter lives on the CPU
input_device_options = {'iter': hu.cpu_do}
self.assertReferenceChecks(
gc, op,
[param, mom1, mom2, grad, LR, ITER],
functools.partial(
self.ref_decay_adagrad,
beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, bias_correction_first=bias_correction_first),
input_device_options=input_device_options)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/decay_adagrad_test.py
|
import functools
import logging
import hypothesis
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
logger = logging.getLogger(__name__)
def ref_wngrad(param_in, seq_b_in, grad, lr, epsilon,
output_effective_lr=False,
output_effective_lr_and_update=False):
# helper functions for wngrad operator test
seq_b_out = seq_b_in + 1.0 / (seq_b_in + epsilon) * np.sum(grad * grad)
effective_lr = lr / (seq_b_in + epsilon)
grad_adj = effective_lr * grad
param_out = param_in + grad_adj
if output_effective_lr_and_update:
return (param_out.astype(np.float32), seq_b_out.astype(np.float32),
effective_lr.astype(np.float32),
grad_adj.astype(np.float32))
elif output_effective_lr:
return (param_out.astype(np.float32), seq_b_out.astype(np.float32),
effective_lr.astype(np.float32))
return (param_out.astype(np.float32), seq_b_out.astype(np.float32))
def wngrad_sparse_test_helper(parent_test, inputs, seq_b, lr, epsilon,
engine, gc, dc):
# helper functions for wngrad operator test
param, grad = inputs
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
# Create an indexing array containing values that are lists of indices,
# which index into grad
indices = np.random.choice(np.arange(grad.shape[0]),
size=np.random.randint(grad.shape[0]), replace=False)
# Sparsify grad
grad = grad[indices]
op = core.CreateOperator(
"SparseWngrad",
["param", "seq_b", "indices", "grad", "lr"],
["param", "seq_b"],
epsilon=epsilon,
engine=engine,
device_option=gc)
def ref_sparse(param, seq_b, indices, grad, lr):
param_out = np.copy(param)
seq_b_out = np.copy(seq_b)
seq_b_out = seq_b + 1.0 / seq_b * np.sum(grad * grad)
for i, index in enumerate(indices):
param_out[index] = param[index] + lr / (seq_b + epsilon) * grad[i]
return (param_out, seq_b_out)
logger.info('test_sparse_adagrad with full precision embedding')
seq_b_i = seq_b.astype(np.float32)
param_i = param.astype(np.float32)
parent_test.assertReferenceChecks(
gc, op, [param_i, seq_b_i, indices, grad, lr],
ref_sparse
)
class TestWngrad(serial.SerializedTestCase):
@given(inputs=hu.tensors(n=2),
seq_b=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
lr=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_wngrad_dense_base(self, inputs, seq_b, lr, epsilon, gc, dc):
param, grad = inputs
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
"Wngrad",
["param", "seq_b", "grad", "lr"],
["param", "seq_b"],
epsilon=epsilon,
device_option=gc,
)
self.assertReferenceChecks(
gc, op,
[param, seq_b, grad, lr],
functools.partial(ref_wngrad, epsilon=epsilon))
@given(inputs=hu.tensors(n=2),
seq_b=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
lr=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_wngrad_dense_output_effective_lr(self, inputs, seq_b,
lr, epsilon, gc, dc):
param, grad = inputs
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
"Wngrad",
["param", "seq_b", "grad", "lr"],
["param", "seq_b", "effective_lr"],
epsilon=epsilon,
device_option=gc,
)
self.assertReferenceChecks(
gc, op,
[param, seq_b, grad, lr],
functools.partial(ref_wngrad, epsilon=epsilon,
output_effective_lr=True))
@given(inputs=hu.tensors(n=2),
seq_b=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
lr=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_wngrad_dense_output_effective_lr_and_update(
self, inputs, seq_b, lr, epsilon, gc, dc):
param, grad = inputs
seq_b = np.abs(np.array([seq_b, ], dtype=np.float32))
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
"Wngrad",
["param", "seq_b", "grad", "lr"],
["param", "seq_b", "effective_lr", "update"],
epsilon=epsilon,
device_option=gc,
)
self.assertReferenceChecks(
gc, op,
[param, seq_b, grad, lr],
functools.partial(ref_wngrad, epsilon=epsilon,
output_effective_lr_and_update=True))
# Suppress filter_too_much health check.
# Likely caused by `assume` call falling through too often.
@settings(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
@given(inputs=hu.tensors(n=2),
seq_b=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
lr=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
def test_sparse_wngrad(self, inputs, seq_b, lr, epsilon, gc, dc):
return wngrad_sparse_test_helper(self, inputs, seq_b, lr, epsilon,
None, gc, dc)
@given(inputs=hu.tensors(n=1),
lr=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
seq_b=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_wngrad_empty(self, inputs, seq_b, lr, epsilon, gc, dc):
param = inputs[0]
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
indices = np.empty(shape=(0,), dtype=np.int64)
hypothesis.note('indices.shape: %s' % str(indices.shape))
op = core.CreateOperator(
"SparseWngrad",
["param", "seq_b", "indices", "grad", "lr"],
["param", "seq_b"],
epsilon=epsilon,
device_option=gc)
def ref_sparse(param, seq_b, indices, grad, lr):
param_out = np.copy(param)
seq_b_out = np.copy(seq_b)
return (param_out, seq_b_out)
print('test_sparse_adagrad_empty with full precision embedding')
seq_b_i = seq_b.astype(np.float32)
param_i = param.astype(np.float32)
self.assertReferenceChecks(
gc, op, [param_i, seq_b_i, indices, grad, lr], ref_sparse
)
|
pytorch-master
|
caffe2/python/operator_test/wngrad_test.py
|
import logging
import caffe2.python.hypothesis_test_util as hu
import numpy as np
from caffe2.python import core
from hypothesis import given, settings, strategies as st
logger = logging.getLogger(__name__)
def get_input_tensors():
height = np.random.randint(1, 10)
width = np.random.randint(1, 10)
dtype = np.float32
input_tensor = hu.arrays(
dims=[height, width],
dtype=dtype,
elements=st.integers(min_value=0, max_value=100),
)
return input_tensor
class TestCopyRowsToTensor(hu.HypothesisTestCase):
@given(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
def test_copy_rows_to_tensor(self, input_tensor, gc, dc):
dtype = np.random.choice([np.float16, np.float32, np.int32, np.int64], 1)[0]
input_tensor = np.array(input_tensor).astype(dtype)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand(width).astype(dtype)
indices_lengths = np.random.randint(height)
all_indices = np.arange(height)
np.random.shuffle(all_indices)
indices = all_indices[:indices_lengths]
def ref(input_tensor, indices, row):
for idx in indices:
input_tensor[idx] = row
return [input_tensor]
op = core.CreateOperator(
"CopyRowsToTensor", ["input_tensor", "indices", "row"], ["input_tensor"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor, indices, row],
reference=ref,
)
@given(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_copy_rows_to_tensor_invalid_input(self, input_tensor, gc, dc):
input_tensor = np.array(input_tensor).astype(np.float32)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand(width + 1).astype(np.float32)
indices_lengths = np.random.randint(height)
all_indices = np.arange(height)
np.random.shuffle(all_indices)
indices = all_indices[:indices_lengths]
self.assertRunOpRaises(
device_option=gc,
op=core.CreateOperator(
"CopyRowsToTensor", ["input_tensor", "indices", "row"], ["input_tensor"]
),
inputs=[input_tensor, indices, row],
exception=RuntimeError,
regexp="width of input tensor should match lengths of row",
)
|
pytorch-master
|
caffe2/python/operator_test/copy_rows_to_tensor_op_test.py
|
from caffe2.python import core, workspace
from caffe2.python.text_file_reader import TextFileReader
from caffe2.python.test_util import TestCase
from caffe2.python.schema import Struct, Scalar, FetchRecord
import tempfile
import numpy as np
class TestTextFileReader(TestCase):
def test_text_file_reader(self):
schema = Struct(
('field1', Scalar(dtype=str)),
('field2', Scalar(dtype=str)),
('field3', Scalar(dtype=np.float32)))
num_fields = 3
col_data = [
['l1f1', 'l2f1', 'l3f1', 'l4f1'],
['l1f2', 'l2f2', 'l3f2', 'l4f2'],
[0.456, 0.789, 0.10101, -24342.64],
]
row_data = list(zip(*col_data))
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as txt_file:
txt_file.write(
'\n'.join(
'\t'.join(str(x) for x in f)
for f in row_data
) + '\n'
)
txt_file.flush()
for num_passes in range(1, 3):
for batch_size in range(1, len(row_data) + 2):
init_net = core.Net('init_net')
reader = TextFileReader(
init_net,
filename=txt_file.name,
schema=schema,
batch_size=batch_size,
num_passes=num_passes)
workspace.RunNetOnce(init_net)
net = core.Net('read_net')
should_stop, record = reader.read_record(net)
results = [np.array([])] * num_fields
while True:
workspace.RunNetOnce(net)
arrays = FetchRecord(record).field_blobs()
for i in range(num_fields):
results[i] = np.append(results[i], arrays[i])
if workspace.FetchBlob(should_stop):
break
for i in range(num_fields):
col_batch = np.tile(col_data[i], num_passes)
if col_batch.dtype in (np.float32, np.float64):
np.testing.assert_array_almost_equal(
col_batch, results[i], decimal=3)
else:
np.testing.assert_array_equal(col_batch, results[i])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/text_file_reader_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestFloor(serial.SerializedTestCase):
@given(X=hu.tensor(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=10000)
def test_floor(self, X, gc, dc, engine):
op = core.CreateOperator("Floor", ["X"], ["Y"], engine=engine)
def floor_ref(X):
return (np.floor(X),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=floor_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/floor_op_test.py
|
from caffe2.python import core
from hypothesis import assume, given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestReduceFrontSum(hu.HypothesisTestCase):
@given(batch_size=st.integers(1, 3),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
NCHW_TO_NHWC = (0, 2, 3, 1)
NHWC_TO_NCHW = (0, 3, 1, 2)
COL_NHWC_TO_NCHW = (4, 2, 3, 0, 1)
N = batch_size
C = channels
H = size
W = size
out_h = int((H + (2 * pad) - dkernel) / stride + 1)
out_w = int((W + (2 * pad) - dkernel) / stride + 1)
im_nchw = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
im_nhwc = im_nchw.transpose(NCHW_TO_NHWC)
op_im2col_nchw = core.CreateOperator(
"Im2Col",
["im_nchw"], ["col_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_im2col_nhwc = core.CreateOperator(
"Im2Col",
["im_nhwc"], ["col_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.create_blob("im_nchw").feed(im_nchw, device_option=gc)
self.ws.create_blob("im_nhwc").feed(im_nhwc, device_option=gc)
self.ws.run(op_im2col_nchw)
self.ws.run(op_im2col_nhwc)
# there is probably a clever way to spell this in np
col_nchw = self.ws.blobs["col_nchw"].fetch()
col_nhwc = self.ws.blobs["col_nhwc"].fetch()
col_nchw_ = col_nchw.reshape(N, C, kernel, kernel, out_h, out_w)
col_nhwc_ = col_nhwc.reshape(N, out_h, out_w, kernel, kernel, C)
for i in range(0, N):
np.testing.assert_allclose(
col_nchw_[i],
col_nhwc_[i].transpose(COL_NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
op_col2im_nchw = core.CreateOperator(
"Col2Im",
["col_nchw", "im_nchw"],
["out_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_col2im_nhwc = core.CreateOperator(
"Col2Im",
["col_nhwc", "im_nhwc"],
["out_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.run(op_col2im_nchw)
self.ws.run(op_col2im_nhwc)
out_nchw = self.ws.blobs["out_nchw"].fetch()
out_nhwc = self.ws.blobs["out_nhwc"].fetch()
np.testing.assert_allclose(
out_nchw,
out_nhwc.transpose(NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
@given(batch_size=st.integers(1, 3),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
channels=st.integers(1, 8),
order=st.sampled_from(["NCHW"]),
**hu.gcs)
@settings(deadline=10000)
def test_col2im_gradients(self, batch_size, stride, pad, kernel,
dilation, size, channels, order, gc, dc):
assume(size >= dilation * (kernel - 1) + 1)
op = core.CreateOperator(
"Im2Col",
["X"], ["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
device_option=gc)
X = np.random.rand(batch_size, channels, size, size).astype(np.float32)
self.assertGradientChecks(gc, op, [X], 0, [0])
return
|
pytorch-master
|
caffe2/python/operator_test/im2col_col2im_test.py
|
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestScaleOps(serial.SerializedTestCase):
@serial.given(dim=st.sampled_from([[1, 386, 1], [386, 1, 1],
[1, 256, 1], [256, 1, 1],
[1024, 256, 1], [1, 1024, 1],
[1, 1, 1]]),
scale=st.floats(0.0, 10.0),
num_tensors=st.integers(1, 10),
**hu.gcs)
def test_scale_ops(self, dim, scale, num_tensors, gc, dc):
in_tensors = []
in_tensor_ps = []
out_tensors = []
out_ref_tensors = []
# initialize tensors
for i in range(num_tensors):
tensor = "X_{}".format(i)
X = np.random.rand(*dim).astype(np.float32) - 0.5
in_tensors.append(tensor)
in_tensor_ps.append(X)
out_tensor = "O_{}".format(i)
out_tensors.append(out_tensor)
workspace.FeedBlob(tensor, X, device_option=gc)
# run ScaleBlobs operator
scale_blobs_op = core.CreateOperator(
"ScaleBlobs",
in_tensors,
out_tensors,
scale=scale,
)
scale_blobs_op.device_option.CopyFrom(gc)
workspace.RunOperatorOnce(scale_blobs_op)
# run Scale op for each tensor and compare with ScaleBlobs
for i in range(num_tensors):
tensor = "X_{}".format(i)
out_ref_tensor = "O_ref_{}".format(i)
scale_op = core.CreateOperator(
"Scale",
[tensor],
[out_ref_tensor],
scale=scale,
)
scale_op.device_option.CopyFrom(gc)
workspace.RunOperatorOnce(scale_op)
o_ref = workspace.FetchBlob(out_ref_tensor)
o = workspace.FetchBlob(out_tensors[i])
np.testing.assert_allclose(o, o_ref)
if __name__ == '__main__':
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/scale_op_test.py
|
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
class TestCounterOps(TestCase):
def test_stats_ops(self):
# The global StatRegistry isn't reset when the workspace is reset,
# so there may be existing stats from a previous test
workspace.RunOperatorOnce(core.CreateOperator(
'StatRegistryExport', [], ['prev_k', 'prev_v', 'prev_ts']))
previous_keys = workspace.FetchBlob('prev_k')
existing = len(previous_keys)
prefix = '/'.join([__name__, 'TestCounterOps', 'test_stats_ops'])
keys = [
(prefix + '/key1').encode('ascii'),
(prefix + '/key2').encode('ascii')
]
values = [34, 45]
workspace.FeedBlob('k', np.array(keys, dtype=str))
workspace.FeedBlob('v', np.array(values, dtype=np.int64))
for _ in range(2):
workspace.RunOperatorOnce(core.CreateOperator(
'StatRegistryUpdate', ['k', 'v'], []))
workspace.RunOperatorOnce(core.CreateOperator(
'StatRegistryExport', [], ['k2', 'v2', 't2']))
workspace.RunOperatorOnce(core.CreateOperator(
'StatRegistryCreate', [], ['reg']))
workspace.RunOperatorOnce(core.CreateOperator(
'StatRegistryUpdate', ['k2', 'v2', 'reg'], []))
workspace.RunOperatorOnce(core.CreateOperator(
'StatRegistryExport', ['reg'], ['k3', 'v3', 't3']))
k3 = workspace.FetchBlob('k3')
v3 = workspace.FetchBlob('v3')
t3 = workspace.FetchBlob('t3')
self.assertEqual(len(k3) - existing, 2)
self.assertEqual(len(v3), len(k3))
self.assertEqual(len(t3), len(k3))
for key in keys:
self.assertIn(key, k3)
|
pytorch-master
|
caffe2/python/operator_test/stats_ops_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.