python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from caffe2.python import workspace
from caffe2.python.core import Plan, to_execution_step, Net
from caffe2.python.task import Task, TaskGroup, final_output
from caffe2.python.net_builder import ops, NetBuilder
from caffe2.python.session import LocalSession
import unittest
import threading
class PythonOpStats(object):
lock = threading.Lock()
num_instances = 0
num_calls = 0
def python_op_builder():
PythonOpStats.lock.acquire()
PythonOpStats.num_instances += 1
PythonOpStats.lock.release()
def my_op(inputs, outputs):
PythonOpStats.lock.acquire()
PythonOpStats.num_calls += 1
PythonOpStats.lock.release()
return my_op
def _test_loop():
x = ops.Const(5)
y = ops.Const(0)
with ops.loop():
ops.stop_if(ops.EQ([x, ops.Const(0)]))
ops.Add([x, ops.Const(-1)], [x])
ops.Add([y, ops.Const(1)], [y])
return y
def _test_inner_stop(x):
ops.stop_if(ops.LT([x, ops.Const(5)]))
def _test_outer():
x = ops.Const(10)
# test stop_if(False)
with ops.stop_guard() as g1:
_test_inner_stop(x)
# test stop_if(True)
y = ops.Const(3)
with ops.stop_guard() as g2:
_test_inner_stop(y)
# test no stop
with ops.stop_guard() as g4:
ops.Const(0)
# test empty clause
with ops.stop_guard() as g3:
pass
return (
g1.has_stopped(), g2.has_stopped(), g3.has_stopped(), g4.has_stopped())
def _test_if(x):
y = ops.Const(1)
with ops.If(ops.GT([x, ops.Const(50)])):
ops.Const(2, blob_out=y)
with ops.If(ops.LT([x, ops.Const(50)])):
ops.Const(3, blob_out=y)
ops.stop()
ops.Const(4, blob_out=y)
return y
class TestNetBuilder(unittest.TestCase):
def test_ops(self):
with NetBuilder() as nb:
y = _test_loop()
z, w, a, b = _test_outer()
p = _test_if(ops.Const(75))
q = _test_if(ops.Const(25))
plan = Plan('name')
plan.AddStep(to_execution_step(nb))
ws = workspace.C.Workspace()
ws.run(plan)
expected = [
(y, 5),
(z, False),
(w, True),
(a, False),
(b, False),
(p, 2),
(q, 3),
]
for b, expected in expected:
actual = ws.blobs[str(b)].fetch()
self.assertEquals(actual, expected)
def _expected_loop(self):
total = 0
total_large = 0
total_small = 0
total_tiny = 0
for loop_iter in range(10):
outer = loop_iter * 10
for inner_iter in range(loop_iter):
val = outer + inner_iter
if val >= 80:
total_large += val
elif val >= 50:
total_small += val
else:
total_tiny += val
total += val
return total, total_large, total_small, total_tiny
def _actual_loop(self):
total = ops.Const(0)
total_large = ops.Const(0)
total_small = ops.Const(0)
total_tiny = ops.Const(0)
with ops.loop(10) as loop:
outer = ops.Mul([loop.iter(), ops.Const(10)])
with ops.loop(loop.iter()) as inner:
val = ops.Add([outer, inner.iter()])
with ops.If(ops.GE([val, ops.Const(80)])) as c:
ops.Add([total_large, val], [total_large])
with c.Elif(ops.GE([val, ops.Const(50)])) as c:
ops.Add([total_small, val], [total_small])
with c.Else():
ops.Add([total_tiny, val], [total_tiny])
ops.Add([total, val], total)
return [
final_output(x)
for x in [total, total_large, total_small, total_tiny]
]
def test_net_multi_use(self):
with Task() as task:
total = ops.Const(0)
net = Net('my_net')
net.Add([total, net.Const(1)], [total])
ops.net(net)
ops.net(net)
result = final_output(total)
with LocalSession() as session:
session.run(task)
self.assertEquals(2, result.fetch())
def test_loops(self):
with Task() as task:
out_actual = self._actual_loop()
with LocalSession() as session:
session.run(task)
expected = self._expected_loop()
actual = [o.fetch() for o in out_actual]
for e, a in zip(expected, actual):
self.assertEquals(e, a)
def test_setup(self):
with Task() as task:
with ops.task_init():
one = ops.Const(1)
two = ops.Add([one, one])
with ops.task_init():
three = ops.Const(3)
accum = ops.Add([two, three])
# here, accum should be 5
with ops.task_exit():
# here, accum should be 6, since this executes after lines below
seven_1 = ops.Add([accum, one])
six = ops.Add([accum, one])
ops.Add([accum, one], [accum])
seven_2 = ops.Add([accum, one])
o6 = final_output(six)
o7_1 = final_output(seven_1)
o7_2 = final_output(seven_2)
with LocalSession() as session:
session.run(task)
self.assertEquals(o6.fetch(), 6)
self.assertEquals(o7_1.fetch(), 7)
self.assertEquals(o7_2.fetch(), 7)
def test_multi_instance_python_op(self):
"""
When task instances are created at runtime, C++ concurrently creates
multiple instances of operators in C++, and concurrently destroys them
once the task is finished. This means that the destructor of PythonOp
will be called concurrently, so the GIL must be acquired. This
test exercises this condition.
"""
with Task(num_instances=64) as task:
with ops.loop(4):
ops.Python((python_op_builder, [], {}))([], [])
with LocalSession() as session:
PythonOpStats.num_instances = 0
PythonOpStats.num_calls = 0
session.run(task)
self.assertEquals(PythonOpStats.num_instances, 64)
self.assertEquals(PythonOpStats.num_calls, 256)
def test_multi_instance(self):
NUM_INSTANCES = 10
NUM_ITERS = 15
with TaskGroup() as tg:
with Task(num_instances=NUM_INSTANCES):
with ops.task_init():
counter1 = ops.CreateCounter([], ['global_counter'])
counter2 = ops.CreateCounter([], ['global_counter2'])
counter3 = ops.CreateCounter([], ['global_counter3'])
# both task_counter and local_counter should be thread local
with ops.task_instance_init():
task_counter = ops.CreateCounter([], ['task_counter'])
local_counter = ops.CreateCounter([], ['local_counter'])
with ops.loop(NUM_ITERS):
ops.CountUp(counter1)
ops.CountUp(task_counter)
ops.CountUp(local_counter)
# gather sum of squares of local counters to make sure that
# each local counter counted exactly up to NUM_ITERS, and
# that there was no false sharing of counter instances.
with ops.task_instance_exit():
count2 = ops.RetrieveCount(task_counter)
with ops.loop(ops.Mul([count2, count2])):
ops.CountUp(counter2)
# This should have the same effect as the above
count3 = ops.RetrieveCount(local_counter)
with ops.loop(ops.Mul([count3, count3])):
ops.CountUp(counter3)
# The code below will only run once
with ops.task_exit():
total1 = final_output(ops.RetrieveCount(counter1))
total2 = final_output(ops.RetrieveCount(counter2))
total3 = final_output(ops.RetrieveCount(counter3))
with LocalSession() as session:
session.run(tg)
self.assertEquals(total1.fetch(), NUM_INSTANCES * NUM_ITERS)
self.assertEquals(total2.fetch(), NUM_INSTANCES * (NUM_ITERS ** 2))
self.assertEquals(total3.fetch(), NUM_INSTANCES * (NUM_ITERS ** 2))
def test_if_net(self):
with NetBuilder() as nb:
x0 = ops.Const(0)
x1 = ops.Const(1)
x2 = ops.Const(2)
y0 = ops.Const(0)
y1 = ops.Const(1)
y2 = ops.Const(2)
# basic logic
first_res = ops.Const(0)
with ops.IfNet(ops.Const(True)):
ops.Const(1, blob_out=first_res)
with ops.Else():
ops.Const(2, blob_out=first_res)
second_res = ops.Const(0)
with ops.IfNet(ops.Const(False)):
ops.Const(1, blob_out=second_res)
with ops.Else():
ops.Const(2, blob_out=second_res)
# nested and sequential ifs,
# empty then/else,
# passing outer blobs into branches,
# writing into outer blobs, incl. into input blob
# using local blobs
with ops.IfNet(ops.LT([x0, x1])):
local_blob = ops.Const(900)
ops.Add([ops.Const(100), local_blob], [y0])
gt = ops.GT([x1, x2])
with ops.IfNet(gt):
# empty then
pass
with ops.Else():
ops.Add([y1, local_blob], [local_blob])
ops.Add([ops.Const(100), y1], [y1])
with ops.IfNet(ops.EQ([local_blob, ops.Const(901)])):
ops.Const(7, blob_out=y2)
ops.Add([y1, y2], [y2])
with ops.Else():
# empty else
pass
plan = Plan('if_net_test')
plan.AddStep(to_execution_step(nb))
ws = workspace.C.Workspace()
ws.run(plan)
first_res_value = ws.blobs[str(first_res)].fetch()
second_res_value = ws.blobs[str(second_res)].fetch()
y0_value = ws.blobs[str(y0)].fetch()
y1_value = ws.blobs[str(y1)].fetch()
y2_value = ws.blobs[str(y2)].fetch()
self.assertEquals(first_res_value, 1)
self.assertEquals(second_res_value, 2)
self.assertEquals(y0_value, 1000)
self.assertEquals(y1_value, 101)
self.assertEquals(y2_value, 108)
self.assertTrue(str(local_blob) not in ws.blobs)
def test_while_net(self):
with NetBuilder() as nb:
x = ops.Const(0)
y = ops.Const(0)
with ops.WhileNet():
with ops.Condition():
ops.Add([x, ops.Const(1)], [x])
ops.LT([x, ops.Const(7)])
ops.Add([x, y], [y])
plan = Plan('while_net_test')
plan.AddStep(to_execution_step(nb))
ws = workspace.C.Workspace()
ws.run(plan)
x_value = ws.blobs[str(x)].fetch()
y_value = ws.blobs[str(y)].fetch()
self.assertEqual(x_value, 7)
self.assertEqual(y_value, 21)
|
pytorch-master
|
caffe2/python/net_builder_test.py
|
from hypothesis import given, settings
import hypothesis.strategies as st
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace
from caffe2.python.core import CreateOperator, GradientRegistry, IR
import numpy as np
# First, we will set up a few gradient registry entries so that we can manually
# construct some test cases.
def NeedAll(op, g_output):
"""A sanity check to make sure that all the gradient are given."""
for name, g in zip(op.output, g_output):
if g is None:
raise RuntimeError(
'Need gradient for "%s" but it is not provided.' % name)
return g_output
def GIS(op):
"""A test util function to generate the gradient name for input."""
return [s + '_grad' for s in op.input]
def CopyDeviceOption(op, src_op):
if src_op.HasField('device_option'):
op.device_option.CopyFrom(src_op.device_option)
return op
# First gradient: (in -> out) leading to (out_grad -> in_grad)
@GradientRegistry.RegisterGradient('Direct')
def AddDirectGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator('DirectGradient', NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
# Second gradient: (in -> out) leading to (out, out_grad -> in_grad)
@GradientRegistry.RegisterGradient('UseOutput')
def AddUseOutputGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator(
'UseOutputGradient',
list(op.output) + NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
@GradientRegistry.RegisterGradient('UseInput')
def AddUseInputGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator(
'UseInputGradient',
list(op.input) + NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
@GradientRegistry.RegisterGradient('Nogradient')
def AddNogradient(op, g_output):
return (
[],
[None for s in op.input]
)
class TestGradientCalculation(test_util.TestCase):
def assertOperatorListEqual(self, operatorDefList1, operatorDefList2):
for op in operatorDefList1:
op.debug_info = ""
if op.device_option:
del op.device_option.extra_info[:]
for op in operatorDefList2:
op.debug_info = ""
if op.device_option:
del op.device_option.extra_info[:]
self.assertEqual(operatorDefList1, operatorDefList2)
@given(device_option=st.sampled_from([
None,
core.DeviceOption(workspace.GpuDeviceType, 1)]))
@settings(deadline=10000)
def testDirect(self, device_option):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
if device_option:
for op in operators:
op.device_option.CopyFrom(device_option)
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
if device_option:
for op in desired_grad_operators:
op.device_option.CopyFrom(device_option)
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testDirectImplicitGradientSource(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator(
"ConstantFill", 'out', "out_autogen_grad", value=1.0),
CreateOperator(
'DirectGradient', 'out_autogen_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
for op in desired_grad_operators:
op.debug_info = ""
gradients, _ = GradientRegistry.GetBackwardPass(
operators, ['out'])
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testDoesNotGenerateUnnecessaryGradients(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
for op in desired_grad_operators:
op.debug_info = ""
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'hidden': 'hidden_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testDirectButNoOutputGradientGiven(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {})
self.assertOperatorListEqual(gradients, [])
def testDirectInPlace(self):
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'in_grad'),
CreateOperator('DirectGradient', 'in_grad', 'in_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testVersionMismatch(self):
operators = [
CreateOperator('Direct', 'x', 'x'),
CreateOperator('Direct', 'y', 'x'),
CreateOperator('Direct', 'x', 'y'),
]
try:
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'y': 'y_grad'})
self.assertFalse(True, "Should raise exception of incorrect version")
except RuntimeError as e:
print(e)
self.assertTrue("version" in str(e))
pass
def testUseOutput(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseOutputGradient',
['out', 'out_grad'], 'hidden_grad'
),
CreateOperator(
'UseOutputGradient',
['hidden', 'hidden_grad'], 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testUseOutputInPlace(self):
operators = [
CreateOperator('UseOutput', 'in', 'in'),
CreateOperator('UseOutput', 'in', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseOutputGradient',
['out', 'out_grad'], 'in_grad'
),
CreateOperator(
'UseOutputGradient',
['in', 'in_grad'], 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testUseOutputButOutputHasBeenChanged(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
# Note here: we overwrite hidden, but hidden will be needed by the
# gradient calculation of the first operator, so the gradient
# registry should return an error.
CreateOperator('Direct', 'hidden', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
def testUseInput(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('UseInput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseInputGradient',
['hidden', 'out_grad'], 'hidden_grad'
),
CreateOperator(
'DirectGradient',
'hidden_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testUseInputButInputHasBeenChanged(self):
"""Test gradient for the following case:
in -> out, with UseInput
in -> in
Since we overwrite in op#1, but in will be needed by the gradient
calculation of op#0, the gradient registry should raise an error.
"""
operators = [
CreateOperator('UseInput', 'in', 'out'),
CreateOperator('Direct', 'in', 'in'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
@given(device_option=st.sampled_from([
None,
core.DeviceOption(workspace.GpuDeviceType, 1)]))
@settings(deadline=10000)
def testMultiUseInput(self, device_option):
"""Test gradient for the following case:
in -> hidden1
in -> hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
if device_option:
for op in operators:
op.device_option.CopyFrom(device_option)
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'], 'in_grad'
),
]
if device_option:
for op in desired_grad_operators:
op.device_option.CopyFrom(device_option)
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {"out": "out_grad"})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testMultiUseInputButWithNoGradient(self):
"""Test gradient for the following case:
in -> hidden1
in -(no gradient)-> hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Nogradient', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden1_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testMultiUseInputAndMultipleVersions(self):
"""Test gradient for the following case:
in -> in
in -> hidden1, hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'], 'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testMultiUseInputAutoGenSumDevice(self):
parallel_tag = "parallelize:shard_by_1"
split_op_device_option_clear_auto_gen_sum = core.DeviceOption(
caffe2_pb2.CPU,
extra_info=[
parallel_tag,
"{}:1".format(IR.ONLY_KEEP_IS_AUTO_GEN_SUM_OPS_TAG),
]
)
split_op_device_option_no_clear_auto_gen_sum = core.DeviceOption(
caffe2_pb2.CPU,
extra_info=[parallel_tag]
)
operators_clear_auto_gen_sum = [
CreateOperator(
'Direct', 'in', 'hidden1',
device_option=split_op_device_option_clear_auto_gen_sum
),
CreateOperator(
'Direct', 'in', 'hidden2',
device_option=split_op_device_option_clear_auto_gen_sum
),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
gradients_clear_auto_gen_sum, _ = GradientRegistry.GetBackwardPass(
operators_clear_auto_gen_sum, {'out': 'out_grad'})
self.assertEqual(gradients_clear_auto_gen_sum[-1].type, "Sum")
self.assertNotIn(
parallel_tag,
gradients_clear_auto_gen_sum[-1].device_option.extra_info
)
operators_no_clear_auto_gen_sum = [
CreateOperator(
'Direct', 'in', 'hidden1',
device_option=split_op_device_option_no_clear_auto_gen_sum
),
CreateOperator(
'Direct', 'in', 'hidden2',
device_option=split_op_device_option_no_clear_auto_gen_sum
),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
gradients_no_clear_auto_gen_sum, _ = GradientRegistry.GetBackwardPass(
operators_no_clear_auto_gen_sum, {'out': 'out_grad'})
self.assertEqual(gradients_clear_auto_gen_sum[-1].type, "Sum")
self.assertIn(
parallel_tag,
gradients_no_clear_auto_gen_sum[-1].device_option.extra_info
)
def testMultiUseInputAndMultipleVersionsBig(self):
"""Test gradient for the following case:
in -> in
in -> hidden1, hidden2
hidden1, hidden2 -> in
in -> hidden3, hidden4, hidden5
hidden3, hidden4, hidden5 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'in'),
CreateOperator('Direct', 'in', 'hidden3'),
CreateOperator('Direct', 'in', 'hidden4'),
CreateOperator('Direct', 'in', 'hidden5'),
CreateOperator('Direct', ['hidden3', 'hidden4', 'hidden5'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden3_grad', 'hidden4_grad', 'hidden5_grad']
),
CreateOperator(
'DirectGradient',
'hidden5_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden4_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'DirectGradient',
'hidden3_grad', '_in_grad_autosplit_1'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0',
'_in_grad_autosplit_1'],
'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', 'in_grad'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'Sum',
['in_grad', '_in_grad_autosplit_0'],
'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
for s in gradients:
print(str(s))
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testGradientMappingUsingSumOp(self):
"""Since Sum is used in accumulating gradients, we will test if
it is OK to also explicitly use it in the graph."""
operators = [
CreateOperator('FC', ['in', 'w', 'b'], 'fc'),
CreateOperator('Sum', 'fc', 'agg'),
CreateOperator('AveragedLoss', 'agg', 'loss'),
]
# This should run correctly.
gradient_ops, _ = GradientRegistry.GetBackwardPass(
operators, {'loss': 'loss_grad'})
for s in gradient_ops:
print(str(s))
def testGradientCalculationWithPrint(self):
"""Test a common use case where we have Print in the forward pass."""
operators = [
CreateOperator('FC', ['in', 'w', 'b'], 'fc'),
CreateOperator('Print', 'fc', []),
CreateOperator('AveragedLoss', 'fc', 'loss'),
]
desired_grad_operators = [
CreateOperator('AveragedLossGradient',
['fc', 'loss_grad'], 'fc_grad'),
CreateOperator('FCGradient', ['in', 'w', 'fc_grad'],
['w_grad', 'b_grad', 'in_grad']),
]
for g in desired_grad_operators:
g.is_gradient_op = 1
# This should run correctly.
gradient_ops, _ = GradientRegistry.GetBackwardPass(
operators, {'loss': 'loss_grad'})
for s in gradient_ops:
print(str(s))
self.assertOperatorListEqual(gradient_ops, desired_grad_operators)
def testStopGradient(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'hidden2'),
CreateOperator('Direct', 'hidden2', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden2_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
def testStopGradientOrphan(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'auto_blobx'),
CreateOperator('Direct', 'hidden', 'out'),
]
with self.assertRaises(ValueError):
# This should complain about incorrect use of StopGradient
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
def testStopGradientInplace(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden_grad'),
]
gradients, grad_map = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
self.assertEqual(grad_map, {'out': 'out_grad'})
def testStopGradientWithMultiUseOperators(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'hidden2'),
CreateOperator('StopGradient', 'hidden', 'hidden3'),
CreateOperator('Direct', ['hidden2', 'hidden3'], 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad',
['hidden2_grad', 'hidden3_grad']),
CreateOperator('DirectGradient', 'hidden2_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
gradients, grad_map = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertOperatorListEqual(gradients, desired_grad_operators)
self.assertEqual(
grad_map, {'out': 'out_grad', 'hidden2': 'hidden2_grad',
'hidden3': 'hidden3_grad', 'hidden': 'hidden_grad',
'in': 'in_grad'})
def test_zero_gradient(self):
net = core.Net("zero_grad_test")
hidden_prev, cell, gates, seq_lengths, timestep =\
net.AddExternalInput("h", "c", "g", "s", "t")
hidden, cell = net.LSTMUnit(
[hidden_prev, cell, gates, seq_lengths, timestep],
["hidden_t", "cell_t"])
with self.assertRaises(Exception):
net.AddGradientOperators([hidden])
net.ZeroGradient(cell, [])
net.AddGradientOperators([hidden])
def test_two_grads(self):
net = core.Net("test_two_grads")
input, two, three = net.AddExternalInput("input", "two", "three")
m1 = net.Mul([input, two], "mul_1")
m2 = net.Mul([m1, three], "mul_2")
grad_map = net.AddGradientOperators([m2, m1])
workspace.ResetWorkspace()
workspace.blobs[input] = np.array([1]).astype(np.float32)
workspace.blobs[two] = np.array([2]).astype(np.float32)
workspace.blobs[three] = np.array([3]).astype(np.float32)
workspace.RunNetOnce(net)
print(net.Proto())
for blob in workspace.blobs:
print(blob, workspace.blobs[blob])
print("Input grad: ", workspace.blobs[grad_map[str(input)]])
assert workspace.blobs[grad_map[str(input)]] == 8.0
# Skip if sparse operators are not available
@unittest.skipIf(not core.IsOperator('SparseFunHash'),
'Sparse operators not available')
class TestSparseGradientsAccumulation(test_util.TestCase):
def testSparseAccumulationWithValues(self):
# The gradient for "Gather" only computes values. indices are directly
# passed from the input
#
# x1-->Gather-->x4-->
# | |
# x2-----+ DotProduct-->x6
# | |
# x3-->Gather-->x5-->
net = core.Net("test_net")
net.Gather(["x2", "x1"], "x4")
net.Gather(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
net.AddGradientOperators(["x6"])
sum_op_i = net.Proto().op[-2]
sum_op_v = net.Proto().op[-1]
self.assertEqual(sum_op_i.input[0], "x3")
self.assertEqual(sum_op_i.input[1], "x1")
self.assertEqual(sum_op_i.output[0], "x2_grad_indices_concat")
self.assertEqual(sum_op_v.input[0], "x5_grad")
self.assertEqual(sum_op_v.input[1], "x4_grad")
self.assertEqual(sum_op_v.output[0], "x2_grad_values_concat")
def testSparseGradientToDense(self):
#
# x1-->Gather-->x4-->
# | |
# x0, w, b-->FC-->x2-->EnsureDenseGradient-->x2---+ DotProduct-->x6
# | |
# x3-->Gather-->x5-->
net = core.Net("test_net")
net.FC(["x0", "w", "b"], "x2")
net.EnsureDense(["x2"], "x2")
net.Gather(["x2", "x1"], "x4")
net.Gather(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
net.AddGradientOperators(["x6"])
ensure_dense_op = net.Proto().op[-2]
self.assertEqual(ensure_dense_op.input[0], "x2_grad_indices_concat")
self.assertEqual(ensure_dense_op.input[1], "x2_grad_values_concat")
self.assertEqual(ensure_dense_op.output[0], "x2_grad")
def testSparseAccumulationWithIndicesAndValues(self):
# The gradient for "SparseFunHash" computes both indices and values
#
# x1-------->
# |
# x2----> |
# | |
# x3---SparseFunHash-->x8
# / \
# x4---+ DotProduct-->x10
# \ /
# x5---SparseFunHash-->x9
# | |
# x6----> |
# |
# x7-------->
net = core.Net("test_net")
net.SparseFunHash(["x1", "x2", "x3", "x4"], "x8")
net.SparseFunHash(["x5", "x6", "x7", "x4"], "x9")
net.DotProduct(["x8", "x9"], "x10")
net.AddGradientOperators(["x10"])
sum_op_i = net.Proto().op[-2]
sum_op_v = net.Proto().op[-1]
self.assertEqual(sum_op_i.input[0], "_x4_grad_indices_autosplit_0")
self.assertEqual(sum_op_i.input[1], "_x4_grad_indices_autosplit_1")
self.assertEqual(sum_op_i.output[0], "x4_grad_indices_concat")
self.assertEqual(sum_op_v.input[0], "_x4_grad_values_autosplit_0")
self.assertEqual(sum_op_v.input[1], "_x4_grad_values_autosplit_1")
self.assertEqual(sum_op_v.output[0], "x4_grad_values_concat")
class TestGradientsAccumulationWithNoGradientOps(test_util.TestCase):
def testNormalAccumulation(self):
# x1-->Relu--x2----------------->DotProduct-->x4
# | |
# -->Softmax-->x3-->
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.DotProduct(["x2", "x3"], "x4")
net.AddGradientOperators(["x4"])
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
def testAccumulationWithNoGradientBranch(self):
# -->PRINT
# |
# x1-->Relu--x2----------------->DotProduct-->x4
# | |
# -->Softmax-->x3-->
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Print("x2", [])
net.Softmax("x2", "x3")
net.DotProduct(["x2", "x3"], "x4")
net.AddGradientOperators(["x4"])
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
class TestGradientsAccumulationWithPassThroughGradients(test_util.TestCase):
def testAddOpInMiddle(self):
# x1-->Relu--x2----------------->Add-->x4
# | |
# -->Softmax-->x3-->
#
# Expected gradient graph:
#
# x1_g<--ReluG<--x2_g<--Sum<------------<---------x4_g
# | |
# <--_x2_g_split_0<--SoftmaxG
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.Add(["x2", "x3"], "x4")
input_to_grad = net.AddGradientOperators({"x4": "x4_grad"})
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddAndDynamicConstant(self):
net = core.Net("test_net")
net.FC(["x1", "x1_w", "x1_b"], ["x2"])
net.Relu("x2", "x2")
net.ConstantFill(["x2"], ["x3"])
net.Add(["x2", "x3"], "x4")
net.FC(["x4", "x4_w", "x4_b"], ["x5"])
net.SoftmaxWithLoss(["x5", "labels"], ["softmax", "loss"])
input_to_grad = net.AddGradientOperators(["loss"])
for op in net.Proto().op:
self.assertFalse(op.type == 'Sum')
self.assertTrue("x4" in input_to_grad)
self.assertTrue("x1" in input_to_grad)
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddAndStaticConstant(self):
net = core.Net("test_net")
net.FC(["x1", "x1_w", "x1_b"], ["x2"])
net.Relu("x2", "x2")
net.ConstantFill([], ["x3"], shape=[1])
net.Add(["x2", "x3"], "x4", broadcast=1)
net.FC(["x4", "x4_w", "x4_b"], ["x5"])
net.SoftmaxWithLoss(["x5", "labels"], ["softmax", "loss"])
input_to_grad = net.AddGradientOperators(["loss"])
print(input_to_grad)
self.assertTrue("x1" in input_to_grad)
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testSubOpInMiddle(self):
# x1-->Relu--x2----------------->Sub-->x4
# | |
# -->Softmax-->x3-->
#
# Expected gradient graph:
#
# x1_g<--ReluG<--x2_g<--Sum<------------<-----------------------x4_g
# | |
# <--_x2_g_split_0<--SoftmaxG<--x3_g<--neg
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.Sub(["x2", "x3"], "x4")
input_to_grad = net.AddGradientOperators({"x4": "x4_grad"})
print(str(net.Proto()))
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddOpAtLeaf(self):
# x1
# \
# -->Add-->x4
# / \
# x2 -->DotProduct-->x6
# \ /
# -->Add-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<--x4_g<--DotProductG<--x6_g
# | | |
# <---x5_g<-------
net = core.Net("test_net")
net.Add(["x1", "x2"], "x4")
net.Add(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testSubOpAtLeaf(self):
# x1
# \
# -->Sub-->x4
# / \
# x2 -->DotProduct-->x6
# \ /
# -->Sub-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<-------Sum<--x2_g_split_0<--neg<--x4_g<--DotProductG<--x6_g
# | |
# x3_g<--neg<--<--x5_g<--------------------------------
net = core.Net("test_net")
net.Sub(["x1", "x2"], "x4")
net.Sub(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testMultiLayerAddOps(self):
# x1
# \
# -->Add-->x4
# / \
# x2 -->Add-->x6
# \ /
# -->Add-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<-----x6_g
# | |
# <--------
net = core.Net("test_net")
net.Add(["x1", "x2"], "x4")
net.Add(["x2", "x3"], "x5")
net.Add(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testMultiLayerSubOps(self):
# x1
# \
# -->Sub-->x4
# / \
# x2 -->Sub-->x6
# \ /
# -->Sub-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<-----x6_g
# | |
# <--------
net = core.Net("test_net")
net.Sub(["x1", "x2"], "x4")
net.Sub(["x2", "x3"], "x5")
net.Sub(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x2_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testAccumulationRuns(self):
net = core.Net("test_net")
input, one, two, three = net.AddExternalInput(
"input", "one", "two", "three")
m1 = net.Mul([input, two], "mul_1")
m2 = net.Mul([input, three], "mul_2")
sub = net.Sub([m1, one])
grad_map = net.AddGradientOperators([m2, sub])
workspace.ResetWorkspace()
workspace.blobs[one] = np.array([1]).astype(np.float32)
workspace.blobs[input] = np.array([1]).astype(np.float32)
workspace.blobs[two] = np.array([2]).astype(np.float32)
workspace.blobs[three] = np.array([3]).astype(np.float32)
workspace.RunNetOnce(net)
print("Input grad: ", workspace.blobs[grad_map[str(input)]])
assert workspace.blobs[grad_map[str(input)]] == 5.0
def testIncorrectOperator(self):
net = core.Net("test_net")
a, b, one = net.AddExternalInput("a", "b", "one")
m1 = net.Mul(a, b) # does not have second output
sub = net.Sub([m1, one])
try:
net.AddGradientOperators([sub])
self.assertFalse(True, "Did not throw exception")
except Exception as e:
self.assertTrue("schema" in str(e))
def testDeviceOptionsPropagation(self):
'''
Test verifies that aggregation operators in a backward path will be in
the same device as the parameter.
'''
device_0 = 'node:0'
# init_net.
init_net = core.Net("init_net")
with core.DeviceScope(0, node_name=device_0):
w = init_net.UniformFill([], 'w', shape=[10000, 64])
ids = init_net.GivenTensorFill(
[],
'ids',
values=np.random.random_integers(low=0, high=10000, size=10),
)
ids_2 = init_net.GivenTensorFill(
[],
'ids_2',
values=np.random.random_integers(low=0, high=10000, size=10),
)
# train_net.
train_net = core.Net("train_net")
with core.DeviceScope(0, node_name=device_0):
vals = train_net.Gather([w, ids], "gathered")
r_vals = train_net.ReduceSum([vals], 1, axes=0)
vals_2 = train_net.Gather([w, ids_2], "gathered_2")
r_vals_2 = train_net.ReduceSum([vals_2], 1, axes=0)
loss = train_net.Sum([r_vals, r_vals_2], 1)
train_net.AddGradientOperators([loss])
# All concat operators should be on device_0
for op in train_net.Proto().op:
if op.type == 'Concat':
self.assertEqual(op.device_option.node_name, device_0)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/core_gradients_test.py
|
## @package model_helper
# Module caffe2.python.model_helper
from caffe2.python import core, scope, workspace
from caffe2.python.helpers.db_input import db_input
from caffe2.python.modeling import parameter_info
from caffe2.python.modeling.parameter_sharing import (
parameter_sharing_context,
)
from caffe2.python.optimizer_context import (
OptimizerContext,
DEFAULT_OPTIM,
)
from caffe2.python.regularizer_context import RegularizerContext
from future.utils import viewitems, viewkeys
from itertools import chain
import logging
# _known_working_ops are operators that do not need special care.
_known_working_ops = [
"Accuracy",
"Adam",
"Add",
"Adagrad",
"SparseAdagrad",
"Adadelta",
"SparseAdadelta",
"AveragedLoss",
"Cast",
"Checkpoint",
"ConstantFill",
"Copy",
"CopyGPUToCPU",
"CopyCPUToGPU",
"DequeueBlobs",
"EnsureCPUOutput",
"ExpandDims",
"Flatten",
"FlattenToVec",
"LabelCrossEntropy",
"LearningRate",
"MakeTwoClass",
"MatMul",
"NCCLAllreduce",
"NHWC2NCHW",
"PackSegments",
"Print",
"PRelu",
"ReduceFrontSum",
"Scale",
"ScatterWeightedSum",
"Sigmoid",
"SortedSegmentSum",
"Snapshot", # Note: snapshot is deprecated, use Checkpoint
"Softmax",
"SoftmaxWithLoss",
"SquaredL2Distance",
"Squeeze",
"StopGradient",
"Summarize",
"Tanh",
"Transpose",
"UnpackSegments",
"WeightedSum",
"YellowFin"
]
class ModelHelper(object):
"""A helper model so we can manange models more easily. It contains net def
and parameter storages. You can add an Operator yourself, e.g.
model = model_helper.ModelHelper(name="train_net")
# init your weight and bias as w and b
w = model.param_init_net.XavierFill(...)
b = model.param_init_net.ConstantFill(...)
fc1 = model.FC([input, w, b], output, **kwargs)
or you can use helper functions in brew module without manually
defining parameter initializations and operators.
model = model_helper.ModelHelper(name="train_net")
fc1 = brew.fc(model, input, output, dim_in, dim_out, **kwargs)
"""
def __init__(self, name=None, init_params=True, allow_not_known_ops=True,
skip_sparse_optim=False, param_model=None, arg_scope=None):
self.name = name or "model"
self.net = core.Net(self.name)
if param_model is not None:
self.param_init_net = param_model.param_init_net
self.param_to_grad = param_model.param_to_grad
self.params = param_model.params
self._parameters_info = param_model._parameters_info
self._computed_params = param_model._computed_params
else:
self.param_init_net = core.Net(self.name + '_init')
self.param_to_grad = {}
self.params = []
self._parameters_info = {}
self._computed_params = []
self._param_info_deprecated = []
self._devices = []
self.gradient_ops_added = False
self.init_params = init_params
self.allow_not_known_ops = allow_not_known_ops
self.skip_sparse_optim = skip_sparse_optim
self.weights = []
self.biases = []
self._arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': False,
}
if arg_scope is not None:
# Please notice value as None is not acceptable. We are not checking it
# here because we already have check in MakeArgument.
self._arg_scope.update(arg_scope)
@property
def arg_scope(self):
return self._arg_scope
def get_name(self):
return self.name
def _infer_param_shape(self, param):
for op in self.param_init_net.Proto().op:
if str(param) in op.output:
for arg in op.arg:
if arg.name == "shape":
return list(arg.ints)
return None
def _update_param_info_deprecated(self):
assert len(self._param_info_deprecated) <= len(self.params)
for param in self.params[len(self._param_info_deprecated):]:
if not isinstance(param, core.BlobReference):
raise ValueError(
"Param %s must be a BlobReference!" % str(param))
self._param_info_deprecated.append(parameter_info.ParameterInfo(
param_id=len(self._param_info_deprecated),
param=param,
shape=self._infer_param_shape(param)))
for info in self._param_info_deprecated:
info.grad = self.param_to_grad.get(info.name)
def _normalize_tags(self, tags):
tags = tags or []
return set(tags) if isinstance(tags, list) else set([tags])
def create_param(self, param_name, shape, initializer, tags=None):
"""
Creates parameter with a given name and initializer.
If param_name is instance of BlobRefernce - then this blob will be used
to store parameter (no any logic will affect it's location).
If param_name is instance of a string type, then the final blob will
be created in the CurrentNameScope with the respect of all parameter
sharing logic, i.e. 'resolved_name_scope/param_name'.
Parameter sharing logic is going to override CurrentNameScope according
to the rules that are specified through ParameterSharing contexts,
all ParameterSharing contexts are applied recursively until there are no
extra overrides present, where on each step the best match will be
applied first.
The following examples should clarify the way ParameterSharing logic
works:
As an example if this function is called with parameter 'w':
a. Call from some scope 'global_scope' with no Parameter sharing:
'global_scope/w'
b. Call from scope 'scope_b', with override {'scope_b': 'scope_a'}:
'scope_a/w'
c. Call from scope 'scope_a', with override {'scope_a': ''}:
'scope_a/w'
d. Call from scope 'scope_b/shared', with overrides
{'scope_b/shared': 'scope_b', 'scope_b': 'scope_a'}:
'scope_a/w'
d. Call from scope 'scope_b/unshared', with overrides
{'scope_b/shared': 'scope_b', 'scope_b': 'scope_a'}:
'scope_a/unshared/w'
"""
# ParameterSharing works only for case when param_name is instance of
# a string type. If param_name is a BlobReference - no attempt for
# ParameterSharing will be applied.
if isinstance(param_name, core.BlobReference):
param_name = str(param_name)
elif isinstance(param_name, str):
# Parameter name will be equal to current Namescope that got
# resolved with the respect of parameter sharing of the scopes.
param_name = parameter_sharing_context.get_parameter_name(
param_name)
else:
raise TypeError("Unsupported type for param_name")
if param_name in self._parameters_info:
assert self._parameters_info[param_name].shape == shape
return self._parameters_info[param_name].blob
param_info = initializer.create_param(
param_name=core.BlobReference(param_name),
init_net=self.param_init_net,
shape=shape,
)
optim_context = OptimizerContext.current()
for tag in self._normalize_tags(tags):
if optim_context.has_optimizer(tag):
# param_info will check optimizer has not been set
param_info.optimizer = optim_context.get_optimizer(tag)
if not param_info.optimizer and optim_context.has_optimizer(DEFAULT_OPTIM):
param_info.optimizer = optim_context.get_optimizer(DEFAULT_OPTIM)
reg_context = RegularizerContext.current()
param_info.regularizer = reg_context
self._parameters_info[param_name] = param_info
# Add param to legacy structs as well, so all other functions for
# parameters are still working.
self.AddParameter(param_info.blob, tags)
return param_info.blob
def get_param_info(self, param):
assert isinstance(param, core.BlobReference), \
"Param {} is not a BlobReference".format(param)
return self._parameters_info.get(param, None)
# This method is deprecated, use create_param method which
# also does parameter initialization when needed
def add_param_DEPRECATED(self, param, key=None, shape=None, length=None):
logging.warning("add_param method is DEPRECATED")
self._update_param_info_deprecated()
self.AddParameter(param)
if key is not None and self.net.input_record() is not None:
idx = self.net.input_record().field_blobs().index(key)
key = self.net.input_record().field_names()[idx]
shape = shape if shape is not None else self._infer_param_shape(param)
if not isinstance(param, core.BlobReference):
raise ValueError("Param %s must be a BlobReference!" % str(param))
self._param_info_deprecated.append(parameter_info.ParameterInfo(
param_id=len(self._param_info_deprecated),
param=param,
shape=shape,
key=key,
length=length,
))
return self._param_info_deprecated[-1]
def AddParameter(self, param, tags=None):
assert isinstance(param, core.BlobReference)
tags = self._normalize_tags(tags)
if parameter_info.ParameterTags.COMPUTED_PARAM in tags:
self._computed_params.append(param)
else:
self.params.append(param)
if parameter_info.ParameterTags.WEIGHT in tags:
self.weights.append(param)
if parameter_info.ParameterTags.BIAS in tags:
self.biases.append(param)
@staticmethod
def _NormalizeNamescope(namescope):
if namescope is None:
return scope.CurrentNameScope()
elif namescope == '' or namescope.endswith(scope._NAMESCOPE_SEPARATOR):
return namescope
else:
return namescope + scope._NAMESCOPE_SEPARATOR
def GetParams(self, namescope=None, top_scope=False):
'''
Returns the params in current namescope
'''
namescope = ModelHelper._NormalizeNamescope(namescope)
if namescope == '':
return self.params[:]
else:
return [p for p in self.params if
p.GetNameScope().startswith(namescope)]
def Proto(self):
return self.net.Proto()
def InitProto(self):
return self.param_init_net.Proto()
def RunAllOnGPU(self, *args, **kwargs):
self.param_init_net.RunAllOnGPU(*args, **kwargs)
self.net.RunAllOnGPU(*args, **kwargs)
def CreateDB(self, blob_out, db, db_type, **kwargs):
dbreader = self.param_init_net.CreateDB(
[], blob_out, db=db, db_type=db_type, **kwargs)
return dbreader
def AddGradientOperators(self, *args, **kwargs):
if self.gradient_ops_added:
raise RuntimeError("You cannot run AddGradientOperators twice.")
self.Validate()
self.gradient_ops_added = True
self.grad_map = self.net.AddGradientOperators(*args, **kwargs)
self.param_to_grad = self.get_param_to_grad(self.params)
# Populate ParameterInfo for all parameters if missing
# and add gradient blob information. So optimizers can use it
for param, grad in self.param_to_grad.items():
param_info = self.get_param_info(param)
if param_info:
param_info.grad = grad
else:
self._parameters_info[param] = parameter_info.ParameterInfo(
param_id=None,
param=param,
grad=grad,
)
return self.grad_map
def get_param_to_grad(self, params):
'''
Given a list of parameters returns a dict from a parameter
to a corresponding gradient
'''
param_to_grad = {}
if not self.gradient_ops_added:
raise RuntimeError("You need to run AddGradientOperators first.")
# We need to use empty namescope when creating the gradients
# to prevent duplicating the namescope prefix for gradient blobs.
for p in params:
if str(p) in self.grad_map:
param_to_grad[p] = self.grad_map[str(p)]
return param_to_grad
def GetOptimizationParamInfo(self, params=None):
'''
Returns a map for param => grad.
If params is not specified, all parameters will be considered.
'''
if not self.gradient_ops_added:
raise RuntimeError("Need to call AddGradientOperators first")
param_to_grad = self.param_to_grad
if params:
param_to_grad = self.get_param_to_grad(params)
return [
self.get_param_info(param) for param, grad in viewitems(param_to_grad)
if (
not self.skip_sparse_optim or
not isinstance(grad, core.GradientSlice)
)
]
def _Validate(self):
'''
Check for duplicate params
'''
params_list = [str(p) for p in self.params]
params_set = set(params_list)
dupes = []
if len(params_set) != len(params_list):
params_list = sorted(params_list)
for j, p in enumerate(params_list):
if j > 0 and params_list[j - 1] == p:
if p not in dupes:
dupes.append(p)
return dupes
def Validate(self):
dupes = self._Validate()
assert dupes == [], "Duplicate params: {}".format(dupes)
def GetComputedParams(self, namescope=None):
'''
Returns the computed params in current namescope. 'Computed params'
are such parameters that are not optimized via gradient descent but are
directly computed from data, such as the running mean and variance
of Spatial Batch Normalization.
'''
namescope = ModelHelper._NormalizeNamescope(namescope)
if namescope == '':
return self._computed_params[:]
else:
return [p for p in self._computed_params
if p.GetNameScope().startswith(namescope)]
def GetAllParams(self, namescope=None):
return self.GetParams(namescope) + self.GetComputedParams(namescope)
def TensorProtosDBInput(
self, unused_blob_in, blob_out, batch_size, db, db_type, **kwargs
):
"""TensorProtosDBInput."""
assert len(unused_blob_in) == 0, \
"""You cannot pass reader to model_helper.TensorProtosDBInput.
Use model.net.TensorProtosDBInput instead to create the op."""
return db_input(
self, blob_out, batch_size, db, db_type, **kwargs)
def GetDevices(self):
assert len(self._devices) > 0, \
"Use data_parallel_model to run model on multiple GPUs."
return self._devices
def __getattr__(self, op_type):
"""Catch-all for all other operators, mostly those without params."""
if op_type.startswith('__'):
raise AttributeError(op_type)
if not core.IsOperator(op_type):
raise AttributeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
','.join(workspace.C.nearby_opnames(op_type)) + ']'
)
if op_type not in _known_working_ops:
if not self.allow_not_known_ops:
raise AttributeError(
"Operator {} is not known to be safe".format(op_type))
logging.warning("You are creating an op that the ModelHelper "
"does not recognize: {}.".format(op_type))
return self.net.__getattr__(op_type)
def __dir__(self):
return sorted(set(chain(
dir(type(self)),
viewkeys(self.__dict__),
_known_working_ops
)))
def GetCompleteNet(self):
r""" Return param_init_net + net Net.
Returns:
'core.Net' containing param_init_net and net
"""
new_net = self.param_init_net.Clone(
self.name + "_complete_net", keep_schema=True)
# add init net info to debug info
for op in new_net.Proto().op:
op.debug_info = op.debug_info + "/param_init_net"
new_net.AppendNet(self.net)
# keep the execution optimization
if self.net.Proto().HasField("type"):
new_net.Proto().type = self.net.Proto().type
return new_net
def ConstructInitTrainNetfromNet(self, net):
r""" construct init net and train net from complete_net
Inputs:
net: 'core.Net' containing param_init_net and train net
"""
param_op_mask = []
train_op_mask = []
for idx, op in enumerate(net.Proto().op):
if op.debug_info.endswith("/param_init_net"):
param_op_mask.append(idx)
else:
train_op_mask.append(idx)
self.param_init_net = net.Clone(
net.Name() + "/generated_param_init_net",
keep_schema=True,
op_id_mask=param_op_mask,
update_external_list=True,
)
self.net = net.Clone(
net.Name() + "/generated_net",
keep_schema=True,
op_id_mask=train_op_mask,
update_external_list=True,
)
def ExtractPredictorNet(
net_proto,
input_blobs,
output_blobs,
device=None,
renames=None,
disabled_inputs=None,
):
'''
Takes a model net for training and returns a net which can be
used for prediction. For example, all gradient operators and
input operators are removed.
@param net_proto protobuf of the net you want to process (net.Proto())
@param input_blobs list/set of blob names that are the inputs of predictor
@param output_blobs list/set of blob names that are outputs of predictor
@param device optional device option that is assigned
@param renames dictionary of blob name to a new name (optional)
@param disabled_inputs optional set of blobs that are 'switched off'. This
will cause branches with those blobs as inputs to be removed
'''
predict_net = core.Net(net_proto.name + "_predict")
predict_proto = predict_net.Proto()
orig_external_inputs = set(net_proto.external_input)
orig_external_outputs = set(net_proto.external_output)
input_blobs = {str(b) for b in input_blobs}
known_blobs = set(orig_external_inputs).union(input_blobs)
output_blobs = {str(b) for b in output_blobs}
external_inputs = set(input_blobs)
external_outputs = set(output_blobs)
if renames is None:
renames = {}
if disabled_inputs is not None:
known_blobs = known_blobs - set(disabled_inputs)
ops = list(net_proto.op)
# Find the range of ops that we should include
try:
first_op_with_input = min(
[
j for j in range(len(ops))
if input_blobs.intersection(ops[j].input) and ops[j].type !=
'StopGradient'
]
)
except ValueError:
raise Exception("No ops with input={}".format(input_blobs))
try:
last_op_with_output = max(
[
j for j in range(len(ops))
if output_blobs.intersection(ops[j].output)
]
)
except ValueError:
raise Exception("No ops with output={}".format(output_blobs))
def validate_op(op):
# Check that the op does not have is_test = 0 set. This is a common
# pitfall with SpatialBN op, at lest.
for arg in op.arg:
if arg.name == "is_test" and arg.i == 0:
raise Exception(
"An operator had is_test=0, did you try to extract a " +
"predictor from a train model (instead of test model)?" +
" Op was: {}".format(str(op))
)
def rename_list(proto_list):
# proto lists don't support assignments
new_list = proto_list[:]
for j, b in enumerate(new_list):
if b in renames:
new_list[j] = renames[b]
del proto_list[:]
proto_list.extend(new_list)
# Iterate through the ops and only include those whose inputs
# we can satisfy.
for op in ops[first_op_with_input:(last_op_with_output + 1)]:
if known_blobs.issuperset(op.input):
# Special handling for recurrent nets
# TODO: when standard argument type for "nets" is introduced,
# this can be more general
if op.type == 'RecurrentNetwork':
for arg in op.arg:
if arg.name == 'backward_step_net':
arg.ClearField(str('n'))
elif arg.name == 'step_net':
for step_op in arg.n.op:
rename_list(step_op.input)
rename_list(step_op.output)
if device is not None:
step_op.device_option.device_type = device.device_type
step_op.device_option.device_id = device.device_id
rename_list(arg.n.external_input)
rename_list(arg.n.external_output)
# Add additional external inputs
external_inputs.update(
set(arg.n.external_input).intersection(
orig_external_inputs
)
)
if device is not None:
op.device_option.device_type = device.device_type
op.device_option.device_id = device.device_id
validate_op(op)
predict_proto.op.extend([op])
known_blobs.update(op.output)
external_inputs.update(
set(op.input).intersection(orig_external_inputs)
)
external_outputs.update(
set(op.output).intersection(orig_external_outputs)
)
else:
logging.debug(
"Op {} had unknown inputs: {}".format(
op.type, set(op.input).difference(known_blobs)
)
)
# Predictor net's external inputs and outputs include only those
# that are part of this net.
predict_proto.external_input.extend(external_inputs)
predict_proto.external_output.extend(external_outputs)
rename_list(predict_proto.external_input)
rename_list(predict_proto.external_output)
renamed_input_blobs = []
for b in input_blobs:
if b in renames:
renamed_input_blobs.append(renames[b])
else:
renamed_input_blobs.append(b)
for op in predict_proto.op:
rename_list(op.input)
rename_list(op.output)
return predict_net, list(
set(predict_proto.external_input) - set(renamed_input_blobs)
)
|
pytorch-master
|
caffe2/python/model_helper.py
|
# @package optimizer
# Module caffe2.python.optimizer
import copy
import logging
from collections import defaultdict, namedtuple
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, scope, utils, workspace
from caffe2.python.modeling import parameter_info
from past.builtins import basestring
_LEARNING_RATE_INJECTION = "lr_injection"
AuxOptimizerParams = namedtuple("AuxOptimizerParams", ["local", "shared"])
_optimizer_instance_count = defaultdict(int)
FP16_ENGINES = ["SIMD_Q_FP16", "SIMD_Q_STOC_FP16", "SIMD_Q_STOC_MKL_FP16"]
logger = logging.getLogger(__name__)
def reset_optimizer_instance_count():
"""
This function clears the _optimizer_instance_count. And keeps it
empty. This functionality is needed in some situations where
optimizer instance count might not reset even though the workplace is reset.
"""
_optimizer_instance_count.clear()
class Optimizer(object):
def __init__(self):
self._aux_params = AuxOptimizerParams(local=[], shared=[])
self._instance_num = _optimizer_instance_count[self.__class__.__name__]
_optimizer_instance_count[self.__class__.__name__] += 1
self._lr_multiplier = None
self._local_lr_multiplier = None
self._local_lr_multiplier_on_gpu = False
"""
Adds optimization operators to the net for given parameter and its gradient
Parameter is specified by either 'param' being a ParameterInfo object.
In this case param.grad has to be set
Or by 'param' being a BlobReference and 'grad' being a BlobReference for its
gradient.
"""
def __call__(self, net, param_init_net, param, grad=None):
if grad is None:
assert isinstance(
param, parameter_info.ParameterInfo
), "Expected parameter to be of type ParameterInfo, got {}".format(param)
assert param.grad is not None
else:
if isinstance(param, basestring):
param = core.BlobReference(param)
param = parameter_info.ParameterInfo(param_id=None, param=param, grad=grad)
self._run(net, param_init_net, param)
def _run(self, net, param_init_net, param_info):
raise Exception("Not Implemented")
def get_cpu_blob_name(self, base_str, node_name=""):
classname = self.__class__.__name__
return "%s_%d_%s%s_cpu" % (classname, self._instance_num, base_str, node_name)
def get_gpu_blob_name(self, base_str, gpu_id, node_name):
classname = self.__class__.__name__
return "%s_%d_%s%s_gpu%d" % (
classname,
self._instance_num,
base_str,
node_name,
gpu_id,
)
@property
def attributes(self):
# return a dict that contains attributes related to init args only
attr = copy.deepcopy(self.__dict__)
del attr["_instance_num"]
return attr
def make_unique_blob_name(self, base_str):
"""
Returns a blob name that will be unique to the current device
and optimizer instance.
"""
current_scope = scope.CurrentDeviceScope()
if current_scope is None:
return self.get_cpu_blob_name(base_str)
if core.IsGPUDeviceType(current_scope.device_type):
return self.get_gpu_blob_name(
base_str, current_scope.device_id, current_scope.node_name
)
else:
return self.get_cpu_blob_name(base_str, current_scope.node_name)
def build_lr(
self,
net,
param_init_net,
base_learning_rate,
learning_rate_blob=None,
policy="fixed",
iter_val=0,
**kwargs
):
if learning_rate_blob is None:
learning_rate_blob = self.make_unique_blob_name("lr")
iteration = utils.BuildUniqueMutexIter(param_init_net, net, iter_val=iter_val)
if not net.BlobIsDefined(learning_rate_blob):
# There is one interesting thing here: since we are minimizing, we are
# doing "descent" so the learning rate is set to be negative.
lr = net.LearningRate(
[iteration],
learning_rate_blob,
base_lr=-base_learning_rate,
policy=policy,
**kwargs
)
else:
lr = net.GetBlobRef(learning_rate_blob)
if self._lr_multiplier is not None:
lr_multiplier = net.CopyFromCPUInput(
self._lr_multiplier, self.make_unique_blob_name("lr_multiplier")
)
lr = net.Mul(
[lr, lr_multiplier],
self.make_unique_blob_name("scaled_lr"),
broadcast=1,
)
if self._local_lr_multiplier is not None:
current_scope = scope.CurrentDeviceScope()
if (
current_scope is not None
and core.IsGPUDeviceType(current_scope.device_type)
and not self._local_lr_multiplier_on_gpu
):
local_lr_multiplier = net.CopyFromCPUInput(
self._local_lr_multiplier,
self.make_unique_blob_name("local_lr_multiplier"),
)
else:
local_lr_multiplier = self._local_lr_multiplier
lr = net.Mul(
[lr, local_lr_multiplier],
self.make_unique_blob_name("local_scaled_lr"),
broadcast=1,
)
return lr, iteration
def add_lr_multiplier(self, lr_multiplier):
"""
Set the global learning rate multiplier. If a multiplier already
existed, this will overwrite the existing multiplier. The multiplier is
used for all future calls to _run(), unless it is overwritten.
"""
self._lr_multiplier = lr_multiplier
def _add_local_lr_multiplier(self, local_lr_multiplier, is_gpu_blob=False):
"""
Set the local learning rate multiplier. This local multiplier is
multiplied with the global learning rate multiplier if it exists. As
with the global learning rate multiplier, this multiplier will be
used for all future calls to _run(), so please call
_clear_local_lr_multiplier() at the beginning of the optimizer's _run()
before optionally calling this function.
"""
self._local_lr_multiplier = local_lr_multiplier
self._local_lr_multiplier_on_gpu = is_gpu_blob
def _clear_local_lr_multiplier(self):
self._local_lr_multiplier = None
self._local_lr_multiplier_on_gpu = False
@staticmethod
def dedup(net, sparse_dedup_aggregator, grad):
assert isinstance(
grad, core.GradientSlice
), "Dedup only works for sparse gradient, got {}".format(grad)
if sparse_dedup_aggregator:
return net.DeduplicateGradientSlices(
grad, aggregator=sparse_dedup_aggregator
)
else:
return grad
def get_auxiliary_parameters(self):
"""Returns a list of auxiliary parameters.
Returns:
aux_params: A namedtuple, AuxParams.
aux_params.local stores a list of blobs. Each blob is a local
auxiliary parameter. A local auxiliary parameter is a parameter in
parallel to a learning rate parameter. Take adagrad as an example,
the local auxiliary parameter is the squared sum parameter, because
every learning rate has a squared sum associated with it.
aux_params.shared also stores a list of blobs. Each blob is a shared
auxiliary parameter. A shared auxiliary parameter is a parameter
that is shared across all the learning rate parameters. Take adam as
an example, the iteration parameter is a shared parameter, because
all the learning rates share the same iteration parameter.
"""
return self._aux_params
# TODO(xlwang): In transfer learning, parameter initialized from pretrained
# model might require a different learning rate than otherwise initialized.
# To this end, here we implement a python solution where
# `base_learning_rate` is scaled by `scale`, by calling
# `scale_learning_rate`; Alternatively, we can achieve same effect by
# rewriting the LearningRate operator in C++
# Note that it is the responsibility of specific optimizer to decide what
# logic should be used for `scale_learning_rate`
def scale_learning_rate(self, *args, **kwargs):
raise NotImplementedError(
"Optimizer Need to Implement `scale_learning_rate` method."
)
def create_lars_inputs(self, param_init_net, weight_decay, trust, lr_max):
wd = param_init_net.ConstantFill(
[], "weight_decay", shape=[1], value=weight_decay
)
trust = param_init_net.ConstantFill([], "trust", shape=[1], value=trust)
lr_max = param_init_net.ConstantFill([], "lr_max", shape=[1], value=lr_max)
return wd, trust, lr_max
class SgdOptimizer(Optimizer):
def __init__(
self,
base_learning_rate=0.01,
policy="fixed",
momentum=0.0,
nesterov=True,
sparse_dedup_aggregator=None,
lars=None,
**kwargs
):
super(SgdOptimizer, self).__init__()
self.base_learning_rate = base_learning_rate
self.policy = policy
self.momentum = momentum
self.nesterov = nesterov
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.lars = lars
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.base_learning_rate == 0:
return
assert (
self.base_learning_rate > 0
), "Expect positive base learning rate, got {}".format(self.base_learning_rate)
self._clear_local_lr_multiplier()
# TODO(zqq): support LARS for sparse parameters
if self.lars is not None and not isinstance(grad, core.GradientSlice):
assert self.lars >= 0, "Lars offset must be nonnegative, got {}".format(
self.lars
)
wd, trust, lr_max = self.create_lars_inputs(
param_init_net, 0.0, 1.0, np.finfo(np.float32).max
)
lr_lars_multiplier = net.Lars(
[param, grad, wd, trust, lr_max],
self.make_unique_blob_name(str(param) + "_lars"),
offset=self.lars,
lr_min=0.0,
)
current_scope = scope.CurrentDeviceScope()
self._add_local_lr_multiplier(
lr_lars_multiplier,
is_gpu_blob=(
current_scope is not None
and core.IsGPUDeviceType(current_scope.device_type)
),
)
# We need negative sign for LR when used directly with WeightedSum
# below.
lr_sign = -1 if self.momentum else 1
lr, _ = self.build_lr(
net,
param_init_net,
base_learning_rate=self.base_learning_rate * lr_sign,
policy=self.policy,
**(self.init_kwargs)
)
dev = scope.CurrentDeviceScope()
if dev is None:
dev = core.DeviceOption(caffe2_pb2.CPU)
# Each GPU/CPU must have its own ONE blob, thus modify the name
# to include device information.
ONE = param_init_net.ConstantFill(
[],
"ONE_{}_{}{}".format(dev.device_type, dev.device_id, dev.node_name),
shape=[1],
value=1.0,
)
self._aux_params.shared.append(ONE)
if self.momentum > 0:
momentum_data = param_init_net.ConstantFill(
param, str(param) + "_momentum", value=0.0
)
self._aux_params.local.append(momentum_data)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
if self.momentum > 0.0:
net.SparseMomentumSGDUpdate(
[grad.values, momentum_data, lr, param, grad.indices],
[grad.values, momentum_data, param],
momentum=self.momentum,
nesterov=self.nesterov,
)
else:
net.ScatterWeightedSum(
[param, ONE, grad.indices, grad.values, lr], param
)
else:
if self.momentum > 0.0:
net.MomentumSGDUpdate(
[grad, momentum_data, lr, param],
[grad, momentum_data, param],
momentum=self.momentum,
nesterov=self.nesterov,
)
else:
coeff = lr
net.WeightedSum([param, ONE, grad, coeff], param)
def scale_learning_rate(self, scale):
self.base_learning_rate *= scale
return
class MultiPrecisionSgdOptimizer(SgdOptimizer):
def __init__(
self,
base_learning_rate=0.1,
momentum=0.0,
policy="fixed",
nesterov=True,
sparse_dedup_aggregator=None,
**kwargs
):
super(MultiPrecisionSgdOptimizer, self).__init__(
base_learning_rate=base_learning_rate,
policy=policy,
momentum=momentum,
nesterov=nesterov,
sparse_dedup_aggregator=sparse_dedup_aggregator,
**kwargs
)
def _run(self, net, param_init_net, param_info):
param = param_info.blob
param_fp32 = (
param_info.blob_copy[core.DataType.FLOAT]
if param_info.blob_copy is not None
else None
)
# If we have a straight fp32 parameter, run the base class
if param_fp32 is None:
return SgdOptimizer._run(self, net, param_init_net, param_info)
grad = param_info.grad
if self.base_learning_rate == 0:
return
assert (
self.base_learning_rate > 0
), "Expect positive base learning rate, got {}".format(self.base_learning_rate)
lr, _ = self.build_lr(
net,
param_init_net,
base_learning_rate=-self.base_learning_rate,
policy=self.policy,
**(self.init_kwargs)
)
momentum_data = param_init_net.ConstantFill(
param_fp32, str(param) + "_momentum", value=0.0
)
self._aux_params.local.append(momentum_data)
assert not isinstance(
grad, core.GradientSlice
), "MultiPrecisionSgd does not support sparse gradients"
# Copy gradient to fp32
grad_fp32 = net.HalfToFloat(grad, grad + "_fp32")
# update (fused) in fp32
net.MomentumSGDUpdate(
[grad_fp32, momentum_data, lr, param_fp32],
[grad_fp32, momentum_data, param_fp32],
momentum=self.momentum,
nesterov=self.nesterov,
)
# Copy updated param back to fp16
net.FloatToHalf(param_fp32, param)
class FP16SgdOptimizer(SgdOptimizer):
def __init__(
self,
base_learning_rate=0.1,
momentum=0.0,
policy="fixed",
nesterov=True,
weight_decay=0.0001,
sparse_dedup_aggregator=None,
**kwargs
):
super(FP16SgdOptimizer, self).__init__(
base_learning_rate=base_learning_rate,
policy=policy,
momentum=momentum,
nesterov=nesterov,
sparse_dedup_aggregator=sparse_dedup_aggregator,
**kwargs
)
self.weight_decay = weight_decay
def _run(self, net, param_init_net, param_info, fp32_update=False):
fp32_update_flag = 0
param_name = str(param_info.blob)
# should only be triggered in FP16 training by SpatialBN, which
# requires FP32 params in CuDNN.
if param_name.find("spatbn") != -1:
fp32_update = True
if fp32_update:
# doing a 32bit update
# Have to assume param_info.blob is FP32 as there is no way
# (that i currently know of) to query a blob's type in python
fp32_update_flag = 1
param = param_info.blob
param_fp32 = param_info.blob
else:
if param_info.blob_copy is None:
# doing a 32bit update
# Have to assume param_info.blob is FP32 as there is no way
# (that i currently know of) to query a blob's type in python
fp32_update_flag = 1
param = param_info.blob
param_fp32 = param_info.blob
else:
if core.DataType.FLOAT in param_info.blob_copy:
param = param_info.blob
param_fp32 = param_info.blob_copy[core.DataType.FLOAT]
elif core.DataType.FLOAT16 in param_info.blob_copy:
param = param_info.blob_copy[core.DataType.FLOAT16]
param_fp32 = param_info.blob
else:
AssertionError(
"Unrecognized parameter format to be updated "
"by FP16 Optimizer. Parameter: {}".format(param_info.name)
)
grad = param_info.grad
if self.base_learning_rate == 0:
return
assert (
self.base_learning_rate > 0
), "Expect positive base learning rate, got {}".format(self.base_learning_rate)
lr, _ = self.build_lr(
net,
param_init_net,
base_learning_rate=-self.base_learning_rate,
policy=self.policy,
**(self.init_kwargs)
)
momentum_data_fp32 = param_init_net.ConstantFill(
param_fp32, str(param) + "_momentum_fp32", value=0.0
)
momentum_data = param_init_net.FloatToHalf(
momentum_data_fp32, str(param) + "_momentum"
)
self._aux_params.local.append(momentum_data)
assert not isinstance(
grad, core.GradientSlice
), "FP16Sgd does not support sparse gradients"
if fp32_update_flag == 0:
net.FP16MomentumSGDUpdate(
[grad, momentum_data, lr, param],
[grad, momentum_data, param],
momentum=self.momentum,
nesterov=self.nesterov,
weight_decay=self.weight_decay,
)
else:
# flag set to 1, therefore doing FP32 update
net.FP32MomentumSGDUpdate(
[grad, momentum_data_fp32, lr, param],
[grad, momentum_data_fp32, param],
momentum=self.momentum,
nesterov=self.nesterov,
weight_decay=self.weight_decay,
)
class WeightDecayBuilder(Optimizer):
def __init__(self, weight_decay):
self.weight_decay = weight_decay
def _run(self, net, param_init_net, param_info):
dev = scope.CurrentDeviceScope()
if dev is None:
dev = core.DeviceOption(caffe2_pb2.CPU)
ONE = param_init_net.ConstantFill(
[], "ONE_{}_{}".format(dev.device_type, dev.device_id), shape=[1], value=1.0
)
WD = param_init_net.ConstantFill(
[],
"wd_{}_{}".format(dev.device_type, dev.device_id),
shape=[1],
value=self.weight_decay,
)
if isinstance(param_info.grad, core.GradientSlice):
raise ValueError("Weight decay does not yet support sparse gradients")
else:
net.WeightedSum(
[param_info.grad, ONE, param_info.blob, WD], param_info.grad
)
class AdagradOptimizer(Optimizer):
def __init__(
self,
alpha=0.01,
epsilon=1e-4,
decay=1,
weight_decay=0.0,
policy="fixed",
sparse_dedup_aggregator=None,
rowWise=False,
engine="",
lars=None,
output_effective_lr=False,
output_effective_lr_and_update=False,
pruning_options=None,
swa_options=None,
ema_options=None,
weight_scale=None,
counter_halflife=-1,
**kwargs
):
super(AdagradOptimizer, self).__init__()
self.alpha = alpha
self.epsilon = epsilon
self.decay = decay
self.weight_decay = float(weight_decay)
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.rowWise = rowWise
self.engine = engine
self.lars = lars
self.output_effective_lr = output_effective_lr
self.output_effective_lr_and_update = output_effective_lr_and_update
self.counter_halflife = counter_halflife
self.init_kwargs = kwargs
self.weight_scale = weight_scale
self._process_pruning_options(pruning_options)
self._process_swa_options(swa_options)
self._process_ema_options(ema_options)
def _process_swa_options(self, swa_options):
self.swa_enabled = True if swa_options else False
if self.swa_enabled:
self.swa_avg_start_it = swa_options.get("swa_avg_start_it", None)
self.swa_avg_end_it = swa_options.get("swa_avg_end_it", None)
self.swa_feedback_start_it = swa_options.get("swa_feedback_start_it", None)
self.swa_feedback_step = swa_options.get("swa_feedback_step", None)
self.swa_feedback_end_it = swa_options.get("swa_feedback_end_it", None)
def _process_ema_options(self, ema_options):
self.ema_enabled = True if ema_options else False
if self.ema_enabled:
self.ema_start = ema_options.get("ema_start", None)
self.ema_end = ema_options.get("ema_end", None)
self.ema_step = ema_options.get("ema_step", None)
self.ema_alpha = ema_options.get("ema_alpha", None)
def _process_pruning_options(self, pruning_options):
self.use_mask = False
if pruning_options is None:
pruning_options = {}
else:
assert isinstance(pruning_options, dict), (
"pruning_options can only "
"be provided as a dictionary, currently: {}".format(pruning_options)
)
self.mask_tensor = pruning_options.get("mask_tensor", None)
self.mask_db_path = pruning_options.get("mask_db_path", None)
self.mask_db_type = pruning_options.get("mask_db_type", None)
self.mask_blob_name = pruning_options.get("mask_blob_name", None)
self.prune_delays = pruning_options.get("prune_delays", [])
self.prune_ratios = pruning_options.get("prune_ratios", [])
self.prune_block_size = pruning_options.get("prune_block_size", 1)
if self.mask_tensor is not None:
assert (
type(self.mask_tensor) is np.ndarray
), "mask_tensor must be a numpy array!"
assert self.mask_db_path is None, (
"mask can be provided through either a numpy array "
"or a db path, not both"
)
assert self.mask_db_type is None, (
"mask can be provided through either a numpy array "
"or a db path, not both"
)
assert self.mask_blob_name is None, (
"mask can be provided through either a numpy array "
"or a db path, not both"
)
self.use_mask = True
if self.mask_db_path is not None or self.mask_db_type is not None:
assert self.mask_db_path is not None, (
"when mask is provided through db, "
"db path, db type, and blob name are all needed"
)
assert self.mask_db_type is not None, (
"when mask is provided through db, "
"db path, db type, and blob name are all needed"
)
assert self.mask_tensor is None, (
"mask can be provided through either a numpy array "
"or a db path, not both"
)
self.use_mask = True
if self.prune_delays:
assert self.prune_ratios is not None and len(self.prune_delays) == len(
self.prune_ratios
), "Prune Delays and prune ratios should be of the same length"
assert (
self.mask_tensor is None
), "Mask Tensor should be None with prune ratios"
assert (
self.mask_db_path is None
), "Mask DB Path should be None with prune ratios"
self.use_mask = True
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
self._clear_local_lr_multiplier()
if self.lars is not None and not isinstance(grad, core.GradientSlice):
assert (
self.weight_decay == 0
), "weight decay is not implemented for LARS yet"
assert self.lars >= 0, "Lars offset must be nonnegative, got {}".format(
self.lars
)
wd, trust, lr_max = self.create_lars_inputs(
param_init_net, 0.0, 1.0, np.finfo(np.float32).max
)
lr_lars_multiplier = net.Lars(
[param, grad, wd, trust, lr_max],
self.make_unique_blob_name(str(param) + "_lars"),
offset=self.lars,
lr_min=0.0,
)
current_scope = scope.CurrentDeviceScope()
self._add_local_lr_multiplier(
lr_lars_multiplier,
is_gpu_blob=(
current_scope is not None
and core.IsGPUDeviceType(current_scope.device_type)
),
)
lr, lr_iteration = self.build_lr(
net,
param_init_net,
base_learning_rate=self.alpha,
policy=self.policy,
**(self.init_kwargs)
)
iteration = lr_iteration
if self.counter_halflife > 0:
self._aux_params.shared.append(iteration)
if self.rowWise:
logger.debug(
"Using engine {} for rowWise Adagrad to train param {}".format(
self.engine, param
)
)
shapes, types = workspace.InferShapesAndTypes([param_init_net])
if str(param) not in shapes:
# Type/shape inference is not available for this param, fallback
# on Shape/Slice logic
shape = param_init_net.Shape(param, str(param) + "_shape")
num_rows = param_init_net.Slice(
[shape], str(shape) + "_numrows", starts=[0], ends=[1]
)
param_squared_sum = param_init_net.ConstantFill(
num_rows,
str(param) + "_avg_squared_sum",
input_as_shape=1,
value=0.0,
)
else:
param_squared_sum = param_init_net.ConstantFill(
[],
str(param) + "_avg_squared_sum",
shape=[shapes[str(param)][0]],
value=0.0,
)
else:
logger.debug(
"Using engine {} for regular Adagrad to train param {}".format(
self.engine, param
)
)
if self.engine in FP16_ENGINES:
assert (
self.weight_decay == 0
), "weight decay is not tested for engine: {}".format(self.engine)
shapes, types = workspace.InferShapesAndTypes([param_init_net])
assert str(param) in shapes, shapes
shape = shapes[str(param)]
param_squared_sum = param_init_net.Float16ConstantFill(
[], str(param) + "_squared_sum", value=0.0, shape=shape
)
else:
param_squared_sum = param_init_net.ConstantFill(
[param], str(param) + "_squared_sum", value=0.0
)
if self.use_mask is True:
assert (
self.weight_decay == 0
), "weight decay is not implemented for use_mask yet"
if self.mask_tensor is not None:
if not isinstance(grad, core.GradientSlice):
mask_blob = param_init_net.GivenTensorFill(
[],
[str(param) + "_mask"],
values=self.mask_tensor,
shape=self.mask_tensor.shape,
)
else:
self.mask_tensor = self.mask_tensor.astype(np.uint8)
mask_blob = param_init_net.GivenTensorBoolFill(
[],
[str(param) + "_mask"],
values=self.mask_tensor,
shape=self.mask_tensor.shape,
)
mask_blob = param_init_net.Cast(mask_blob, to=core.DataType.UINT8)
mask_changed_blob = param_init_net.ConstantFill(
[],
[str(param) + "_mask_changed_blob"],
value=False,
dtype=core.DataType.BOOL,
shape=[1],
)
elif (
self.mask_db_path is not None or self.mask_db_type is not None
): # mask is provided through a db file
# if mask_blob_name is not given use the param name to derive mask name
self.mask_blob_name = self.mask_blob_name or str(param) + "_mask"
mask_blob = param_init_net.Load(
[],
self.mask_blob_name,
db=self.mask_db_path,
db_type=self.mask_db_type,
absolute_path=True,
)
if isinstance(grad, core.GradientSlice):
mask_changed_blob = param_init_net.ConstantFill(
[],
[str(param) + "_mask_changed_blob"],
value=False,
dtype=core.DataType.BOOL,
shape=[1],
)
elif self.prune_delays:
last_mask_updated_iter = param_init_net.ConstantFill(
[],
[str(param) + "_last_mask_updated_iter"],
value=-1,
dtype=core.DataType.INT64,
shape=[1],
)
if isinstance(grad, core.GradientSlice):
AssertionError(
"Prune Delays and Prune Ratios are currently not supported"
"for sparse operators"
)
else:
mask_blob = param_init_net.GivenTensorFill(
[],
[str(param) + "_empty_mask"],
values=[],
dtype=core.DataType.FLOAT,
shape=[0],
)
else:
raise NotImplementedError(
"If mask is used, it needs a numpy array or a db file or"
"a delay iter needs to be provided"
)
self._aux_params.local.append(param_squared_sum)
if self.counter_halflife > 0:
shapes, types = workspace.InferShapesAndTypes([param_init_net])
if str(param) not in shapes:
shape = param_init_net.Shape(param, str(param) + "_shape")
num_rows = param_init_net.Slice(
[shape], str(shape) + "_numrows", starts=[0], ends=[1]
)
update_counter = param_init_net.ConstantFill(
num_rows,
str(param) + "_update_counter",
input_as_shape=1,
value=0.0,
dtype=core.DataType.DOUBLE,
)
prev_update_iter = param_init_net.ConstantFill(
num_rows,
str(param) + "_prev_update_iter",
input_as_shape=1,
value=0,
dtype=core.DataType.INT64,
)
else:
update_counter = param_init_net.ConstantFill(
[],
str(param) + "_update_counter",
shape=[shapes[str(param)][0]],
value=0.0,
dtype=core.DataType.DOUBLE,
)
prev_update_iter = param_init_net.ConstantFill(
[],
str(param) + "_prev_update_iter",
shape=[shapes[str(param)][0]],
value=0,
dtype=core.DataType.INT64,
)
self._aux_params.local.append(update_counter)
self._aux_params.local.append(prev_update_iter)
if self.rowWise:
assert isinstance(grad, core.GradientSlice), (
"If SparseAdagrad with rowWise=True, gradient must be "
"a gradientslice. PLease ensure that rowWise is not enabled "
"for the dense Adagrad optimizer, as it is not supported."
)
shapes, _ = workspace.InferShapesAndTypes([param_init_net])
param_shape = shapes[str(param)]
weight_decay = 0.0
if isinstance(grad, core.GradientSlice):
if len(param_shape) == 1:
weight_decay = 0.0
logger.warn(
"SKIPPING weight decay on 1d sparse param: {}.shape is {}".format(
str(param), param_shape
)
)
else:
weight_decay = self.weight_decay
else:
# Skip weight decay for 1d parameters
if len(param_shape) == 1:
weight_decay = 0.0
logger.warning(
"SKIPPING weight decay on 1d dense param: {}.shape is {}".format(
str(param), param_shape
)
)
else:
weight_decay = self.weight_decay
logger.debug(
"weight_decay for {} (shape:{}): {}".format(
str(param), param_shape, weight_decay
)
)
if isinstance(grad, core.GradientSlice):
assert (
self.decay == 1.0
), "Decay is not implemented for SparseAdagrad and must be set to 1"
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
input_args = [param, param_squared_sum, grad.indices, grad.values, lr]
output_args = [param, param_squared_sum]
if self.rowWise:
if self.use_mask is True:
op = "MaskedRowWiseSparseAdagrad"
assert (
weight_decay == 0
), "weight decay is not implemented for {} yet".format(op)
input_args += [mask_blob, mask_changed_blob]
else:
if self.counter_halflife > 0:
input_args += [update_counter]
op = "RowWiseSparseAdagrad"
else:
if self.use_mask is True:
op = "MaskedSparseAdagrad"
assert (
weight_decay == 0
), "weight decay is not implemented for {} yet".format(op)
input_args += [mask_blob, mask_changed_blob]
else:
op = "SparseAdagrad"
logger.debug("using {} for {}".format(op, str(param)))
if self.prune_delays:
input_args += [lr_iteration, last_mask_updated_iter]
output_args += [mask_blob, last_mask_updated_iter]
if weight_decay > 0 and self.counter_halflife == -1:
net.__getattr__(op)(
input_args,
output_args,
epsilon=self.epsilon,
weight_decay=weight_decay,
engine=self.engine,
)
elif weight_decay > 0 and self.counter_halflife != -1:
net.__getattr__(op)(
input_args,
output_args,
epsilon=self.epsilon,
weight_decay=weight_decay,
engine=self.engine,
counter_halflife=self.counter_halflife,
)
else:
net.__getattr__(op)(
input_args, output_args, epsilon=self.epsilon, engine=self.engine
)
if self.counter_halflife > 0:
net.RowWiseCounter(
[prev_update_iter, update_counter, grad.indices, iteration],
[prev_update_iter, update_counter],
counter_halflife=self.counter_halflife,
)
else:
input_args = [param, param_squared_sum, grad, lr]
output_args = [param, param_squared_sum]
if self.output_effective_lr_and_update:
assert (
self.use_mask is False
), "MaskedAdagrad doesn't support outputting effective_lr_and_update"
output_args.append(str(param) + "_effective_lr")
output_args.append(str(param) + "_update")
elif self.output_effective_lr:
assert (
self.use_mask is False
), "MaskedAdagrad doesn't support outputting effective_lr"
output_args.append(str(param) + "_effective_lr")
if self.use_mask is True:
input_args += [mask_blob]
if self.prune_delays:
input_args += [lr_iteration, last_mask_updated_iter]
output_args += [mask_blob, last_mask_updated_iter]
if self.use_mask:
assert (
weight_decay == 0
), "weight decay is not implemented for use_mask yet"
net.MaskedAdagrad(
input_args,
output_args,
epsilon=self.epsilon,
decay=float(self.decay),
block_size=self.prune_block_size,
delays=self.prune_delays,
prune_ratios=self.prune_ratios,
engine=self.engine,
)
else:
if weight_decay > 0:
net.Adagrad(
input_args,
output_args,
epsilon=self.epsilon,
decay=float(self.decay),
weight_decay=weight_decay,
engine=self.engine,
)
else:
net.Adagrad(
input_args,
output_args,
epsilon=self.epsilon,
decay=float(self.decay),
engine=self.engine,
)
if self.swa_enabled:
param_swa = str(param) + "_swa"
if not param_init_net.BlobIsDefined(param_swa):
param_init_net.ConstantFill([param], param_swa, value=0.0)
self._aux_params.local.append(param_swa)
net.SWA(
[param, param_swa, lr_iteration],
[param, param_swa],
avg_start=self.swa_avg_start_it,
avg_end=self.swa_avg_end_it,
feedback_start=self.swa_feedback_start_it,
feedback_step=self.swa_feedback_step,
feedback_end=self.swa_feedback_end_it,
)
if self.ema_enabled:
param_ema = str(param) + "_ema"
if not param_init_net.BlobIsDefined(param_ema):
param_init_net.ConstantFill([param], param_ema, value=0.0)
self._aux_params.local.append(param_ema)
net.EMA(
[param, param_ema, lr_iteration],
[param, param_ema],
ema_start=self.ema_start,
ema_end=self.ema_end,
ema_step=self.ema_step,
ema_alpha=self.ema_alpha,
)
if self.weight_scale:
net.WeightScale(
[param, lr_iteration],
[param],
stepsize=self.weight_scale.stepsize,
upper_bound_iter=self.weight_scale.upper_bound_iter,
scale=float(self.weight_scale.scale),
)
if self.weight_scale.to_aux:
net.WeightScale(
[param_squared_sum, lr_iteration],
[param_squared_sum],
stepsize=self.weight_scale.stepsize,
upper_bound_iter=self.weight_scale.upper_bound_iter,
scale=float(self.weight_scale.scale),
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class WngradOptimizer(Optimizer):
def __init__(
self,
alpha=1.0,
epsilon=1e-9,
policy="fixed",
sparse_dedup_aggregator=None,
engine="",
moment_init=100.0,
lars=None,
output_effective_lr=False,
output_effective_lr_and_update=False,
**kwargs
):
super(WngradOptimizer, self).__init__()
self.alpha = alpha
self.epsilon = epsilon
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
self.moment_init = moment_init
self.lars = lars
self.output_effective_lr = output_effective_lr
self.output_effective_lr_and_update = output_effective_lr_and_update
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
self._clear_local_lr_multiplier()
if self.lars is not None and not isinstance(grad, core.GradientSlice):
assert self.lars >= 0, "Lars offset must be nonnegative, got {}".format(
self.lars
)
wd, trust, lr_max = self.create_lars_inputs(
param_init_net, 0.0, 1.0, np.finfo(np.float32).max
)
lr_lars_multiplier = net.Lars(
[param, grad, wd, trust, lr_max],
self.make_unique_blob_name(str(param) + "_lars"),
offset=self.lars,
lr_min=0.0,
)
current_scope = scope.CurrentDeviceScope()
self._add_local_lr_multiplier(
lr_lars_multiplier,
is_gpu_blob=(
current_scope is not None
and core.IsGPUDeviceType(current_scope.device_type)
),
)
lr, _ = self.build_lr(
net,
param_init_net,
base_learning_rate=self.alpha,
policy=self.policy,
**(self.init_kwargs)
)
moment = param_init_net.ConstantFill(
[], str(param) + "_moment", shape=[1], value=self.moment_init
)
self._aux_params.local.append(moment)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseWngrad(
[param, moment, grad.indices, grad.values, lr],
[param, moment],
epsilon=self.epsilon,
engine=self.engine,
)
else:
output_args = [param, moment]
if self.output_effective_lr_and_update:
output_args.append(str(param) + "_effective_lr")
output_args.append(str(param) + "_update")
elif self.output_effective_lr:
output_args.append(str(param) + "_effective_lr")
net.Wngrad(
[param, moment, grad, lr],
output_args,
epsilon=self.epsilon,
engine=self.engine,
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class StormOptimizer(Optimizer):
def __init__(
self,
lr=0.1,
momentum=10.0,
beta=0.1,
grad_sq_init=0.01,
policy="fixed",
sparse_dedup_aggregator=None,
lars=None,
**kwargs
):
"""Constructor function to add STORM Optimizer
Args:
lr: learning rate scaling (called k in the original paper)
momentum: momentum scaling (called c in the original paper)
beta: initial value of denominator in adaptive learning rate (
called c in the original paper)
grad_sq_init: initial value of gradient squared accumulator.
policy: specifies how learning rate should be applied, options are
'fixed', 'step', 'exp', etc.
sparse_dedup_aggregator: specifies deduplication strategy for
gradient slices. Works while using sparse gradients. Options
include 'mean' and 'sum'.
lars: lars offset.
"""
super(StormOptimizer, self).__init__()
self.lr = lr
self.momentum = momentum
self.beta = beta
self.grad_sq_init = grad_sq_init
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.lars = lars
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.lr <= 0:
return
self._clear_local_lr_multiplier()
if self.lars is not None and not isinstance(grad, core.GradientSlice):
assert self.lars >= 0, "Lars offset must be nonnegative, got {}".format(
self.lars
)
wd, trust, lr_max = self.create_lars_inputs(
param_init_net, 0.0, 1.0, np.finfo(np.float32).max
)
lr_lars_multiplier = net.Lars(
[param, grad, wd, trust, lr_max],
self.make_unique_blob_name(str(param) + "_lars"),
offset=self.lars,
lr_min=0.0,
)
current_scope = scope.CurrentDeviceScope()
self._add_local_lr_multiplier(
lr_lars_multiplier,
is_gpu_blob=(
current_scope is not None
and core.IsGPUDeviceType(current_scope.device_type)
),
)
lr, _ = self.build_lr(
net,
param_init_net,
base_learning_rate=self.lr,
policy=self.policy,
**(self.init_kwargs)
)
moment = param_init_net.ConstantFill(param, str(param) + "_moment", value=0.0)
self._aux_params.local.append(moment)
grad_sq_sum = param_init_net.ConstantFill(
[], str(param) + "_grad_sq_sum", shape=[1], value=self.grad_sq_init
)
self._aux_params.local.append(grad_sq_sum)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseStorm(
[param, moment, grad_sq_sum, grad.values, grad.indices, lr],
[param, moment, grad_sq_sum],
momentum=self.momentum,
beta=self.beta,
)
else:
net.Storm(
[param, moment, grad_sq_sum, grad, lr],
[param, moment, grad_sq_sum],
momentum=self.momentum,
beta=self.beta,
)
def scale_learning_rate(self, scale):
self.lr *= scale
class AdadeltaOptimizer(Optimizer):
def __init__(
self,
alpha=0.01,
epsilon=1e-4,
decay=0.95,
policy="fixed",
sparse_dedup_aggregator=None,
engine="",
**kwargs
):
"""Constructor function to add Adadelta Optimizer
Args:
alpha: learning rate
epsilon: attribute of Adadelta to avoid numerical issues
decay: attribute of Adadelta to decay the squared gradient sum
policy: specifies how learning rate should be applied, options are
"fixed", "step", "exp", etc.
sparse_dedup_aggregator: specifies deduplication strategy for
gradient slices. Works while using sparse gradients. Options
include "mean" and "sum".
engine: the engine used, options include "", "CUDNN", etc.
"""
super(AdadeltaOptimizer, self).__init__()
self.alpha = alpha
self.epsilon = epsilon
self.decay = decay
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
lr, _ = self.build_lr(
net,
param_init_net,
base_learning_rate=self.alpha,
policy=self.policy,
**(self.init_kwargs)
)
moment = param_init_net.ConstantFill(
[param], str(param) + "_squared_moment", value=0.0
)
moment_update = param_init_net.ConstantFill(
[param], str(param) + "_squared_moment_update", value=0.0
)
self._aux_params.local.append(moment)
self._aux_params.local.append(moment_update)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseAdadelta(
[param, moment, moment_update, grad.indices, grad.values, lr],
[param, moment, moment_update],
epsilon=self.epsilon,
decay=self.decay,
engine=self.engine,
)
else:
net.Adadelta(
[param, moment, moment_update, grad, lr],
[param, moment, moment_update],
epsilon=self.epsilon,
decay=self.decay,
engine=self.engine,
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class FtrlOptimizer(Optimizer):
def __init__(
self,
alpha=0.01,
beta=1e-4,
lambda1=0,
lambda2=0,
sparse_dedup_aggregator=None,
engine="",
):
super(FtrlOptimizer, self).__init__()
self.alpha = alpha
self.beta = beta
self.lambda1 = lambda1
self.lambda2 = lambda2
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
nz = param_init_net.ConstantFill(
[param], str(param) + "_ftrl_nz", extra_shape=[2], value=0.0
)
self._aux_params.local.append(nz)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseFtrl(
[param, nz, grad.indices, grad.values],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2,
)
else:
net.Ftrl(
[param, nz, grad],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2,
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class GFtrlOptimizer(Optimizer):
"""Group Lasso FTRL Optimizer."""
def __init__(
self,
alpha=0.01,
beta=1e-4,
lambda1=0,
lambda2=0,
sparse_dedup_aggregator=None,
engine="",
):
super(GFtrlOptimizer, self).__init__()
self.alpha = alpha
self.beta = beta
self.lambda1 = lambda1
self.lambda2 = lambda2
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
nz = param_init_net.ConstantFill(
[param], str(param) + "_gftrl_nz", extra_shape=[2], value=0.0
)
self._aux_params.local.append(nz)
net.GFtrl(
[param, nz, grad],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2,
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class AdamOptimizer(Optimizer):
def __init__(
self,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
policy="fixed",
use_lr_adaption=False,
lr_alpha=0.01,
normalized_lr_adaption=True,
sparse_dedup_aggregator=None,
rowWise=False,
engine="",
enableRAdam=False,
use_smart_decay=False, # See https://fburl.com/2jdiwrhy for context.
**kwargs
):
super(AdamOptimizer, self).__init__()
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.policy = policy
self.use_lr_adaption = use_lr_adaption
self.lr_alpha = lr_alpha
self.normalized_lr_adaption = normalized_lr_adaption
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.rowWise = rowWise
self.engine = engine
self.enableRAdam = enableRAdam
if use_smart_decay:
if rowWise:
raise NotImplementedError(('Smart decay is not implemented for rowWise Adam. '
'Set rowWise or use_smart_decay to False.'))
if enableRAdam:
raise NotImplementedError(('Smart decay is not implemented for RAdam. '
'Set enableRAdam or use_smart_decay to False.'))
if use_lr_adaption:
raise NotImplementedError(('Smart decay is not implemented with lr_adaption. '
'Set use_lr_adaption or use_smart_decay to False.'))
self.use_smart_decay = use_smart_decay
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
lr, iteration = self.build_lr(
net,
param_init_net,
base_learning_rate=self.alpha,
policy=self.policy,
**(self.init_kwargs)
)
m1 = param_init_net.ConstantFill([param], param + "_first_moment", value=0.0)
if self.rowWise:
shapes, types = workspace.InferShapesAndTypes([param_init_net])
m2 = param_init_net.ConstantFill(
[], param + "_avg_second_moment", shape=[shapes[param][0]], value=0.0
)
else:
m2 = param_init_net.ConstantFill(
[param], param + "_second_moment", value=0.0
)
# Initialize "minibatch in which this parameter was last seen" for smart decay.
if self.use_smart_decay:
shapes, _ = workspace.InferShapesAndTypes([param_init_net])
last_seen = param_init_net.ConstantFill(
[], param + "_last_seen", shape=[shapes[param][0]], value=0, dtype=core.DataType.INT64
)
self._aux_params.local.append(last_seen)
self._aux_params.shared.append(iteration)
self._aux_params.local.append(m1)
self._aux_params.local.append(m2)
if self.rowWise:
assert isinstance(grad, core.GradientSlice), (
"If SparseAdam with rowWise=True, gradient must be "
"a gradientslice. PLease ensure that rowWise is not enabled "
"for the dense Adam optimizer, as it is not supported."
)
output_blobs = [param, m1, m2]
if self.use_smart_decay:
output_blobs.append(last_seen)
if self.use_lr_adaption:
effective_grad = str(param) + "_effective_grad"
output_blobs.append(effective_grad)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
if self.rowWise:
op = "RowWiseSparseAdam"
elif self.use_smart_decay:
op = "SmartDecaySparseAdam"
else:
op = "SparseAdam"
# Currently, only SparseAdam support RAdam, other Adam Ops will support later
if op == "SparseAdam":
net.__getattr__(op)(
[param, m1, m2, grad.indices, grad.values, lr, iteration],
output_blobs,
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon,
enableRAdam=self.enableRAdam,
)
elif op == "SmartDecaySparseAdam":
net.__getattr__(op)(
[param, m1, m2, last_seen, grad.indices, grad.values, lr, iteration],
output_blobs,
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon,
)
else:
assert (
not self.enableRAdam
), "Currently, RowWiseSparseAdam is not supported by RAdam!"
net.__getattr__(op)(
[param, m1, m2, grad.indices, grad.values, lr, iteration],
output_blobs,
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon,
)
if self.use_lr_adaption:
net.LearningRateAdaption(
[lr, grad.values, effective_grad],
[lr],
lr_alpha=self.lr_alpha,
normalized_lr_adaption=self.normalized_lr_adaption,
)
else:
net.Adam(
[param, m1, m2, grad, lr, iteration],
output_blobs,
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon,
)
if self.use_lr_adaption:
net.LearningRateAdaption(
[lr, grad, effective_grad],
[lr],
lr_alpha=self.lr_alpha,
normalized_lr_adaption=self.normalized_lr_adaption,
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class DecayAdagradOptimizer(Optimizer):
def __init__(
self,
alpha=0.01,
beta1=0.0,
beta2=0.999,
epsilon=0.1,
weight_decay=0.0,
ema_options=None,
bias_correction_first=True,
policy="fixed",
engine="",
**kwargs
):
super(DecayAdagradOptimizer, self).__init__()
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.weight_decay = weight_decay
self.bias_correction_first = bias_correction_first
self.policy = policy
self.engine = engine
self.init_kwargs = kwargs
self._process_ema_options(ema_options)
def _process_ema_options(self, ema_options):
self.ema_enabled = True if ema_options else False
if self.ema_enabled:
self.ema_start = ema_options.get("ema_start", None)
self.ema_end = ema_options.get("ema_end", None)
self.ema_step = ema_options.get("ema_step", None)
self.ema_alpha = ema_options.get("ema_alpha", None)
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
lr, iteration = self.build_lr(
net,
param_init_net,
base_learning_rate=self.alpha,
policy=self.policy,
**(self.init_kwargs)
)
if isinstance(grad, core.GradientSlice):
# hack for position weighted.
param_squared_sum = param_init_net.ConstantFill([param], param + "_squared_sum", value=0.0)
self._aux_params.local.append(param_squared_sum)
output_blobs = [param, param_squared_sum]
net.SparseAdagrad(
[param, param_squared_sum, grad.indices, grad.values, lr],
output_blobs,
epsilon=self.epsilon,
)
else:
m1 = param_init_net.ConstantFill([param], param + "_first_mo1ment", value=0.0)
m2 = param_init_net.ConstantFill([param], param + "_second_moment", value=0.0)
self._aux_params.shared.append(iteration)
self._aux_params.local.append(m1)
self._aux_params.local.append(m2)
output_blobs = [param, m1, m2]
net.DecayAdagrad(
[param, m1, m2, grad, lr, iteration],
output_blobs,
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon,
weight_decay=self.weight_decay,
bias_correction_first=self.bias_correction_first,
)
if self.ema_enabled:
param_ema = str(param) + "_ema"
if not param_init_net.BlobIsDefined(param_ema):
param_init_net.ConstantFill([param], param_ema, value=0.0)
self._aux_params.local.append(param_ema)
net.EMA(
[param, param_ema, iteration],
[param, param_ema],
ema_start=self.ema_start,
ema_end=self.ema_end,
ema_step=self.ema_step,
ema_alpha=self.ema_alpha,
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class YellowFinOptimizer(Optimizer):
"""YellowFin: An automatic tuner for momentum SGD
See https://arxiv.org/abs/1706.03471 for more details. This implementation
has separate learning rate and momentum per each parameter."""
def __init__(
self,
alpha=0.1,
mu=0.0,
beta=0.999,
curv_win_width=20,
zero_debias=True,
epsilon=0.1 ** 6,
policy="fixed",
sparse_dedup_aggregator=None,
**kwargs
):
super(YellowFinOptimizer, self).__init__()
self.alpha = alpha
self.mu = mu
self.beta = beta
self.curv_win_width = curv_win_width
self.zero_debias = zero_debias
self.epsilon = epsilon
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
# Note: This is number of persistent scalars in YellowFin optimizer.
# It should always be the number of scalars being used. The same
# number should be used in class for the operation.
SCALARS_MEMORY_SIZE = 5
param = param_info.blob
grad = param_info.grad
moment = param_init_net.ConstantFill([param], param + "_moment", value=0.0)
curv_win = param_init_net.ConstantFill(
[], param + "_curv_win", shape=[self.curv_win_width], value=0.0
)
g_avg = param_init_net.ConstantFill([param], param + "_g_avg", value=0.0)
g2_avg = param_init_net.ConstantFill([param], param + "_g2_avg", value=0.0)
lr_avg = param_init_net.ConstantFill(
[], param + "_lr_avg", shape=[1], value=self.alpha
)
mu_avg = param_init_net.ConstantFill(
[], param + "_mu_avg", shape=[1], value=self.mu
)
scalars_memory = param_init_net.ConstantFill(
[], param + "_scalars_memory", shape=[SCALARS_MEMORY_SIZE], value=0.0
)
assert self.alpha > 0
assert not isinstance(
grad, core.GradientSlice
), "YellowFin does not support sparse gradients"
iteration = utils.BuildUniqueMutexIter(param_init_net, net, iter_val=0)
self._aux_params.shared.append(iteration)
self._aux_params.local.append(moment)
self._aux_params.local.append(lr_avg)
self._aux_params.local.append(mu_avg)
self._aux_params.local.append(curv_win)
self._aux_params.local.append(g_avg)
self._aux_params.local.append(g2_avg)
self._aux_params.local.append(scalars_memory)
yf_in_out_args = [
param,
moment,
lr_avg,
mu_avg,
curv_win,
g_avg,
g2_avg,
scalars_memory,
]
net.YellowFin(
yf_in_out_args + [grad, iteration],
yf_in_out_args,
beta=self.beta,
epsilon=self.epsilon,
curv_win_width=self.curv_win_width,
zero_debias=self.zero_debias,
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class RmsPropOptimizer(Optimizer):
def __init__(
self,
alpha=0.01,
decay=0.9,
momentum=0.0,
epsilon=1e-5,
policy="fixed",
engine="",
**kwargs
):
super(RmsPropOptimizer, self).__init__()
self.alpha = alpha
self.decay = decay
self.momentum = momentum
self.epsilon = epsilon
self.policy = policy
self.engine = engine
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
assert self.alpha > 0
assert not isinstance(
grad, core.GradientSlice
), "RmsPropOptimizer doesn't support sparse gradients"
dev = scope.CurrentDeviceScope()
if dev is None:
dev = core.DeviceOption(caffe2_pb2.CPU)
ONE = param_init_net.ConstantFill(
[], "ONE_{}_{}".format(dev.device_type, dev.device_id), shape=[1], value=1.0
)
lr, _ = self.build_lr(
net,
param_init_net,
base_learning_rate=-self.alpha,
policy=self.policy,
**(self.init_kwargs)
)
grad_o = param_init_net.ConstantFill(
[param], str(param) + "_grad_o", values=0.0
)
ms = param_init_net.ConstantFill(
[param], str(param) + "_mean_squares", values=0.0
)
mom = param_init_net.ConstantFill([param], str(param) + "_momentum", values=0.0)
self._aux_params.local.append(ms)
self._aux_params.local.append(mom)
net.RmsProp(
[grad, ms, mom, ONE],
[grad_o, ms, mom],
decay=self.decay,
momentum=self.momentum,
epsilon=self.epsilon,
engine=self.engine,
)
net.MomentumSGDUpdate([grad_o, mom, lr, param], [grad_o, mom, param])
def scale_learning_rate(self, scale):
self.alpha *= scale
return
def _get_param_to_device(model):
# Infer blob devices by going through the net and param_init_net
# ops and observing the device used to create or use the blob.
param_to_device = core.InferBlobDevices(model.net)
param_to_device.update(core.InferBlobDevices(model.param_init_net))
return param_to_device
def get_param_device(param_name, grad, param_to_device=None, default_device=None):
device = default_device
param_to_device = param_to_device or {}
# We first check if parameter's device has been inferred. If not,
# we check the gradient. This can happen if parameter is not output
# by any blob but created by a FetchBlob.
if param_name in param_to_device:
device = param_to_device[param_name]
else:
if isinstance(grad, core.GradientSlice):
grad = grad
if str(grad.values) in param_to_device:
device = param_to_device[str(grad.values)]
elif str(grad.indices) in param_to_device:
device = param_to_device[str(grad.indices)]
else:
grad_name = str(grad)
if grad_name in param_to_device:
device = param_to_device[grad_name]
assert device is not None, "Cannot infer device for {}: no op creates it".format(
param_name
)
return device
def get_lr_injection():
"""
Gets current value for lr_injection, a multiplier for all base
learning rates.
Must set allow_lr_injection=True when building optimizer, as it
relies on synchronization over CPU.
"""
return workspace.FetchBlob(_LEARNING_RATE_INJECTION)
def set_lr_injection(lr_injection_value):
"""
Sets lr_injection, a multiplier for all base learning rates.
Must set allow_lr_injection=True when building optimizer, as it
relies on synchronization over CPU.
"""
workspace.FeedBlob(
_LEARNING_RATE_INJECTION,
np.array([float(lr_injection_value)], dtype=np.float32),
)
def _calc_norm_ratio(model, params, name_scope, param_to_device, max_gradient_norm):
with core.NameScope(name_scope):
grad_squared_sums = []
for i, param in enumerate(params):
device = get_param_device(str(param.blob), param.grad, param_to_device)
with core.DeviceScope(device):
grad = (
param.grad
if not isinstance(param.grad, core.GradientSlice)
else param.grad.values
)
grad_squared_sum_name = "grad_{}_squared_sum".format(i)
grad_squared_sum = model.net.SumSqrElements(grad, grad_squared_sum_name)
grad_squared_sum_cpu = model.net.EnsureCPUOutput(grad_squared_sum)
grad_squared_sums.append(grad_squared_sum_cpu)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
grad_squared_full_sum = model.net.Sum(
grad_squared_sums, "grad_squared_full_sum"
)
global_norm = model.net.Pow(
grad_squared_full_sum, "global_norm", exponent=0.5
)
clip_norm = model.param_init_net.ConstantFill(
[], "clip_norm", shape=[], value=float(max_gradient_norm)
)
max_norm = model.net.Max([global_norm, clip_norm], "max_norm")
norm_ratio = model.net.Div([clip_norm, max_norm], "norm_ratio")
return norm_ratio
def _build(
model,
optimizer,
weights_only=False,
use_param_info_optim=True,
max_gradient_norm=None,
allow_lr_injection=False,
):
param_to_device = _get_param_to_device(model)
# Validate there are no duplicate params
model.Validate()
params = []
for param_info in model.GetOptimizationParamInfo():
if weights_only and param_info.blob not in model.weights:
continue
params.append(param_info)
lr_multiplier = None
if max_gradient_norm is not None:
lr_multiplier = _calc_norm_ratio(
model,
params,
"norm_clipped_grad_update",
param_to_device,
max_gradient_norm,
)
if allow_lr_injection:
if not model.net.BlobIsDefined(_LEARNING_RATE_INJECTION):
lr_injection = model.param_init_net.ConstantFill(
[], _LEARNING_RATE_INJECTION, shape=[1], value=1.0
)
else:
lr_injection = _LEARNING_RATE_INJECTION
if lr_multiplier is None:
lr_multiplier = lr_injection
else:
lr_multiplier = model.net.Mul(
[lr_multiplier, lr_injection], "lr_multiplier", broadcast=1
)
optimizer.add_lr_multiplier(lr_multiplier)
for param_info in params:
param_name = str(param_info.blob)
device = get_param_device(param_name, param_info.grad, param_to_device)
with core.DeviceScope(device):
if param_info.optimizer and use_param_info_optim:
param_info.optimizer(model.net, model.param_init_net, param_info)
else:
optimizer(model.net, model.param_init_net, param_info)
return optimizer
def add_weight_decay(model, weight_decay):
"""Adds a decay to weights in the model.
This is a form of L2 regularization.
Args:
weight_decay: strength of the regularization
"""
_build(
model,
WeightDecayBuilder(weight_decay=weight_decay),
weights_only=True,
use_param_info_optim=False,
)
def build_sgd(
model,
base_learning_rate,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
sgd_optimizer = SgdOptimizer(base_learning_rate, **kwargs)
return _build(
model,
sgd_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
def build_multi_precision_sgd(
model,
base_learning_rate,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
multi_prec_sgd_optimizer = MultiPrecisionSgdOptimizer(base_learning_rate, **kwargs)
return _build(
model,
multi_prec_sgd_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
def build_fp16_sgd(model, base_learning_rate, **kwargs):
fp16_sgd_optimizer = FP16SgdOptimizer(base_learning_rate, **kwargs)
return _build(model, fp16_sgd_optimizer)
def build_ftrl(model, engine="SIMD", **kwargs):
if engine == "SIMD":
assert core.IsOperator("Ftrl_ENGINE_SIMD")
assert core.IsOperator("SparseFtrl_ENGINE_SIMD")
ftrl_optimizer = FtrlOptimizer(engine=engine, **kwargs)
return _build(model, ftrl_optimizer)
def build_gftrl(model, engine="", **kwargs):
if engine == "SIMD":
assert core.IsOperator("GFtrl_ENGINE_SIMD")
gftrl_optimizer = GFtrlOptimizer(engine=engine, **kwargs)
return _build(model, gftrl_optimizer)
def build_adagrad(
model,
base_learning_rate,
parameters=None,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
adagrad_optimizer = AdagradOptimizer(alpha=base_learning_rate, **kwargs)
return _build(
model,
adagrad_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
def build_wngrad(
model,
base_learning_rate,
parameters=None,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
wngrad_optimizer = WngradOptimizer(alpha=base_learning_rate, **kwargs)
return _build(
model,
wngrad_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
def build_storm(
model,
base_learning_rate,
parameters=None,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
storm_optimizer = StormOptimizer(lr=base_learning_rate, **kwargs)
return _build(
model,
storm_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
def build_adadelta(
model,
base_learning_rate,
parameters=None,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
adadelta_optimizer = AdadeltaOptimizer(alpha=base_learning_rate, **kwargs)
return _build(
model,
adadelta_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
def build_adam(
model,
base_learning_rate,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
adam_optimizer = AdamOptimizer(alpha=base_learning_rate, **kwargs)
return _build(
model,
adam_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
def build_decay_adagrad(
model,
base_learning_rate,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
decay_adagrad_optimizer = DecayAdagradOptimizer(alpha=base_learning_rate, **kwargs)
return _build(
model,
decay_adagrad_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
def build_yellowfin(model, base_learning_rate=0.1, **kwargs):
yellowfin_optimizer = YellowFinOptimizer(alpha=base_learning_rate, **kwargs)
return _build(model, yellowfin_optimizer)
def build_rms_prop(
model,
base_learning_rate,
max_gradient_norm=None,
allow_lr_injection=False,
**kwargs
):
rms_prop_optimizer = RmsPropOptimizer(alpha=base_learning_rate, **kwargs)
return _build(
model,
rms_prop_optimizer,
max_gradient_norm=max_gradient_norm,
allow_lr_injection=allow_lr_injection,
)
|
pytorch-master
|
caffe2/python/optimizer.py
|
"""unittest for ModelHelper class"""
import unittest
from caffe2.python import brew, model_helper
class ModelHelperTest(unittest.TestCase):
def test_get_complete_net_type(self):
model = model_helper.ModelHelper("test_orig")
brew.conv(
model,
"input",
"conv",
dim_in=3,
dim_out=16,
weight_init=("MSRAFill", {}),
kernel=3,
stride=1,
pad=0,
)
model.net.Proto().type = "async_scheduling"
net = model.GetCompleteNet()
model2 = model_helper.ModelHelper("test_new")
model2.ConstructInitTrainNetfromNet(net)
self.assertTrue(model2.net.Proto().type, "async_scheduling")
self.assertTrue(model2.param_init_net.Proto().type, "async_scheduling")
def test_get_complete_net(self):
model = model_helper.ModelHelper("test_orig")
conv = brew.conv(
model,
"input",
"conv",
dim_in=3,
dim_out=16,
weight_init=("MSRAFill", {}),
kernel=3,
stride=1,
pad=0,
)
conv = brew.spatial_bn(model, conv, "conv_bn", 16, epsilon=1e-3, is_test=False)
conv = brew.relu(model, conv, "conv_relu")
pred = brew.fc(model, conv, "pred", dim_in=16 * 3 * 3, dim_out=10)
brew.softmax(model, pred, "softmax")
net = model.GetCompleteNet()
model2 = model_helper.ModelHelper("test_new")
model2.ConstructInitTrainNetfromNet(net)
net = model.param_init_net
net2 = model2.param_init_net
for op1, op2 in zip(net.Proto().op, net2.Proto().op):
op1.debug_info = op1.debug_info + "/param_init_net"
self.assertEqual(
op1, op2, "op mismatch between {}\n and {}\n".format(op1, op2)
)
net = model.net
net2 = model2.net
for op1, op2 in zip(net.Proto().op, net2.Proto().op):
self.assertEqual(
op1, op2, "op mismatch between {}\n and {}\n".format(op1, op2)
)
# this is not guaranteed in other situations where user define own net
self.assertEqual(
sorted(map(str, net.external_inputs)),
sorted(map(str, net2.external_inputs)),
)
|
pytorch-master
|
caffe2/python/model_helper_test.py
|
# @package optimizer
# Module caffe2.python.regularizer
from caffe2.python import core, utils
import numpy as np
class RegularizationBy(object):
AFTER_OPTIMIZER = "after_optimizer"
ON_LOSS = "on_loss"
class Regularizer(object):
def __init__(self):
self.kEpsilon = 1e-9
"""
Adds regularization to train_net for given parameter. Its factor ahead of
regularization is given when initialization.
The param should be a BlobReference.
"""
def __call__(self, net, param_init_net, param, grad=None, by=None):
assert isinstance(param, core.BlobReference)
by_enum = utils.EnumClassKeyVals(RegularizationBy)
assert by in by_enum.values(), (
"Regularizer of type {} is called with invalid by={}, "
"not in {}".format(self.__class__, by, by_enum.values())
)
run_func = "_run_" + by
assert hasattr(
self, run_func
), "Regularizer of type {} does not implement function {}".format(
self.__class__, run_func
)
return getattr(self, run_func)(net, param_init_net, param, grad)
def _run_on_loss(self, net, param_init_net, param, grad=None):
return None
def _run_after_optimizer(self, net, param_init_net, param, grad):
return None
def _feature_grouping(self, param, net):
# Possible alternative grouping method via summing over absolute values
# Compute l2norm over feature weights
# pow( sum_i { pow(theda_i, 2) } , 0.5)
param_mul = net.Mul([param, param], [net.NextScopedBlob("param_mul")])
param_reduced = net.ReduceFrontSum(
[param_mul], [net.NextScopedBlob("param_reduced")]
)
grouped_feature_weight_vec = net.Pow(
[param_reduced],
[net.NextScopedBlob("grouped_feature_weight_vec")],
exponent=0.5,
)
return grouped_feature_weight_vec
def _ensure_clipped(
self,
net,
param,
grad=None,
min=None,
max=None,
open_range=False,
left_open=False,
right_open=False,
):
min = (
min + self.kEpsilon
if min is not None and (open_range or left_open)
else min
)
max = (
max - self.kEpsilon
if max is not None and (open_range or right_open)
else max
)
input_blobs = (
[param, grad.indices, grad.values]
if isinstance(grad, core.GradientSlice)
else [param]
)
net.EnsureClipped(input_blobs, [param], min=min, max=max)
class L1Norm(Regularizer):
def __init__(self, reg_lambda):
super(L1Norm, self).__init__()
assert reg_lambda >= 0, "factor ahead of regularization should be 0 or positive"
self.reg_lambda = reg_lambda
def _run_on_loss(self, net, param_init_net, param, grad=None):
output_blob = net.NextScopedBlob(param + "_l1_regularization")
net.LpNorm([param], [output_blob], p=1)
net.Scale([output_blob], [output_blob], scale=self.reg_lambda)
return output_blob
class LpNorm(Regularizer):
def __init__(self, reg_lambda, p_value=0.5):
"""
reg_lambda: parameter to scale regularization by
p_value: determines what type of Lp norm to calculate. If p > 0,
we will calculate Lp norm with the formula:
pow( sum_i { pow(theda_i, p) } , 1/p)
"""
super(LpNorm, self).__init__()
assert reg_lambda > 0, "factor ahead of regularization should be greater than 0"
assert p_value > 0, "p_value factor should be greater than 0"
self.p_value = p_value
self.reg_lambda = reg_lambda
def _run_on_loss(self, net, param_init_net, param, grad=None):
# TODO: the second dim (num of input nodes) of param is after feature preproc,
# and does not correspond to the original num of dense features.
# In the future, will want to create a util to reduce the input dim of param to
# match the num of dense features.
output_blob = net.NextScopedBlob(param + "_dense_feature_regularization")
grouped_feature_weight_vec = self._feature_grouping(param, net)
# Compute Lpnorm:
# pow( sum_i { pow(theda_i, p) } , 1/p)
lp_vec_raised = net.Pow(
[grouped_feature_weight_vec],
[net.NextScopedBlob("lp_vec_raised")],
exponent=self.p_value,
)
lp_vec_summed = net.ReduceFrontSum(
[lp_vec_raised], [net.NextScopedBlob("lp_vec_summed")]
)
lp_norm = net.Pow(
[lp_vec_summed],
[net.NextScopedBlob("lp_vec")],
exponent=(1 / self.p_value),
)
net.Scale([lp_norm], [output_blob], scale=self.reg_lambda)
return output_blob
class L0ApproxNorm(Regularizer):
def __init__(self, reg_lambda, alpha=0.01, budget=0):
"""
reg_lambda: parameter to scale regularization by
alpha: hyper parameter to tune that is only used in the calculation
of approximate L0 norm
budget: desired number of features. If the number of features is greater
than the budget amount, then the least important features will
be penalized. If there are fewer features than the desired
budget, no penalization will be applied. Optional parameter, if
0, then no budget is used
"""
super(L0ApproxNorm, self).__init__()
assert reg_lambda > 0, "factor ahead of regularization should be greater than 0"
assert alpha > 0, "alpha factor must be a positive value greater than 0"
assert budget >= 0, "budget factor must be greater than or equal to 0"
self.reg_lambda = reg_lambda
self.alpha = alpha
self.budget = float(budget) # budget must be float for future calculations
def _run_on_loss(self, net, param_init_net, param, grad=None):
# TODO: the second dim (num of input nodes) of param is after feature preproc,
# and does not correspond to the original num of dense features.
# In the future, will want to create a util to reduce the input dim of param to
# match the num of dense features.
output_blob = net.NextScopedBlob(param + "_dense_feature_regularization")
grouped_feature_weight_vec = self._feature_grouping(param, net)
# compute approximate L0 norm
# sum_i ( min ( abs (theta_i), alpha))) / alpha
l0_abs = net.Abs([grouped_feature_weight_vec], [net.NextScopedBlob("l0_abs")])
l0_min = net.Clip([l0_abs], [net.NextScopedBlob("l0_min")], max=self.alpha)
l0_summed = net.ReduceFrontSum([l0_min], [net.NextScopedBlob("l0_summed")])
l0_norm = net.Scale(
[l0_summed], [net.NextScopedBlob("l0_norm")], scale=(1 / self.alpha)
)
# incorporate budget factor
# regularization = reg_lambda * max(0, l0_norm - budget)
if self.budget:
budget_blob = net.ConstantFill([], "budget", shape=[1], value=self.budget)
l0_sub_budget = net.Sub(
[l0_norm, budget_blob], [net.NextScopedBlob("l0_budget")]
)
relu_l0_sub_budget = net.Relu(
[l0_sub_budget], [net.NextScopedBlob("relu_l0_sub_budget")]
)
net.Scale([relu_l0_sub_budget], [output_blob], scale=self.reg_lambda)
else:
net.Scale([l0_norm], [output_blob], scale=self.reg_lambda)
return output_blob
class L1NormTrimmed(Regularizer):
"""
The Trimmed Lasso: Sparsity and Robustness. https://arxiv.org/abs/1708.04527
"""
def __init__(self, reg_lambda, k):
super(L1NormTrimmed, self).__init__()
assert reg_lambda >= 0, "factor ahead of regularization should be 0 or positive"
assert isinstance(k, int), "k should be an interger as expected #. after selection"
assert k >= 1, "k should be larger than 1"
self.reg_lambda = reg_lambda
self.k = k
def _run_on_loss(self, net, param_init_net, param, grad=None):
output_blob = net.NextScopedBlob(param + "_l1_trimmed_regularization")
abs = net.Abs([param], [net.NextScopedBlob("abs")])
sum_abs = net.SumElements([abs], [net.NextScopedBlob("sum_abs")], average=False)
topk, _, _ = net.TopK([abs], [net.NextScopedBlob("topk"), net.NextScopedBlob("id"), net.NextScopedBlob("flat_id")], k=self.k)
topk_sum = net.SumElements([topk], [net.NextScopedBlob("topk_sum")], average=False)
net.Sub([sum_abs, topk_sum], [output_blob])
net.Scale([output_blob], [output_blob], scale=self.reg_lambda)
return output_blob
class L2Norm(Regularizer):
def __init__(self, reg_lambda):
super(L2Norm, self).__init__()
assert reg_lambda >= 0, "factor ahead of regularization should be 0 or positive"
self.reg_lambda = reg_lambda
def _run_on_loss(self, net, param_init_net, param, grad=None):
output_blob = net.NextScopedBlob(param + "_l2_regularization")
net.LpNorm([param], [output_blob], p=2)
net.Scale([output_blob], [output_blob], scale=self.reg_lambda)
return output_blob
class ElasticNet(Regularizer):
def __init__(self, l1, l2):
super(ElasticNet, self).__init__()
self.l1 = l1
self.l2 = l2
def _run_on_loss(self, net, param_init_net, param, grad=None):
output_blob = net.NextScopedBlob(param + "_elastic_net_regularization")
l2_blob = net.NextScopedBlob(param + "_l2_blob")
l1_blob = net.NextScopedBlob(param + "_l1_blob")
net.LpNorm([param], [l2_blob], p=2)
net.LpNorm([param], [l1_blob], p=1)
net.Scale([l2_blob], [l2_blob], scale=self.l2)
net.Scale([l1_blob], [l1_blob], scale=self.l1)
net.Add([l1_blob, l2_blob], [output_blob])
return output_blob
class ElasticNetL1NormTrimmed(Regularizer):
def __init__(self, l1, l2, k):
super(ElasticNetL1NormTrimmed, self).__init__()
self.l1 = l1
self.l2 = l2
self.k = k
def _run_on_loss(self, net, param_init_net, param, grad=None):
output_blob = net.NextScopedBlob(param + "_elastic_net_l1_trimmed_regularization")
l2_blob = net.NextScopedBlob(param + "_l2_blob")
net.LpNorm([param], [l2_blob], p=2)
net.Scale([l2_blob], [l2_blob], scale=self.l2)
l1_blob = net.NextScopedBlob(param + "_l1_blob")
abs = net.Abs([param], [net.NextScopedBlob("abs")])
sum_abs = net.SumElements([abs], [net.NextScopedBlob("sum_abs")], average=False)
topk, _, _ = net.TopK([abs], [net.NextScopedBlob("topk"), net.NextScopedBlob("id"), net.NextScopedBlob("flat_id")], k=self.k)
topk_sum = net.SumElements([topk], [net.NextScopedBlob("topk_sum")], average=False)
net.Sub([sum_abs, topk_sum], [l1_blob])
net.Scale([l1_blob], [l1_blob], scale=self.l1)
net.Add([l1_blob, l2_blob], [output_blob])
return output_blob
class MaxNorm(Regularizer):
def __init__(self, norm=1.0, dtype=None):
super(MaxNorm, self).__init__()
self.norm = norm
self.dtype = dtype
def _run_after_optimizer(self, net, param_init_net, param, grad):
assert self.norm > 0, "norm should be bigger than 0."
if isinstance(grad, core.GradientSlice):
if self.dtype and self.dtype == 'fp16':
net.Float16SparseNormalize(
[param, grad.indices],
[param],
use_max_norm=True,
norm=self.norm,
)
else:
net.SparseNormalize(
[param, grad.indices],
[param],
use_max_norm=True,
norm=self.norm,
)
else:
raise NotImplementedError("MaxNorm is not supported for dense parameters")
class ConstantNorm(Regularizer):
def __init__(self, norm=1.0):
super(ConstantNorm, self).__init__()
self.norm = norm
def _run_after_optimizer(self, net, param_init_net, param, grad):
assert self.norm > 0, "norm should be bigger than 0."
if isinstance(grad, core.GradientSlice):
net.SparseNormalize(
[param, grad.indices],
[param],
use_max_norm=False,
norm=self.norm,
)
else:
raise NotImplementedError(
"ConstantNorm is not supported for dense parameters"
)
class SparseLpNorm(Regularizer):
def __init__(self, p, reg_lambda):
super(SparseLpNorm, self).__init__()
assert p in (1.0, 2.0), "Sparse Lp regularization only implemented for p = 1.0 and p = 2.0."
assert reg_lambda > 0, "factor ahead of regularization should be greater than 0."
self.p = p
self.reg_lambda = reg_lambda
def _run_after_optimizer(self, net, param_init_net, param, grad):
if isinstance(grad, core.GradientSlice):
net.SparseLpRegularizer(
[param, grad.indices],
[param],
p=self.p,
reg_lambda=self.reg_lambda,
)
else:
raise NotImplementedError("SparseLpNorm is not supported for dense parameters")
class SparseL1Norm(SparseLpNorm):
def __init__(self, reg_lambda):
super(SparseL1Norm, self).__init__(p=1.0, reg_lambda=reg_lambda)
class SparseL2Norm(SparseLpNorm):
def __init__(self, reg_lambda):
super(SparseL2Norm, self).__init__(p=2.0, reg_lambda=reg_lambda)
class LogBarrier(Regularizer):
"""
Wright, S., & Nocedal, J. (1999). Numerical optimization. Springer Science,
35(67-68), 7. Chapter 19
"""
def __init__(self, reg_lambda, discount_policy="inv", discount_options=None):
"""
discount is a positive weight that is decreasing, and here it is implemented
similar to the learning rate. It is specified by a learning rate policy and
corresponding options
"""
super(LogBarrier, self).__init__()
assert reg_lambda > 0, "factor ahead of regularization should be 0 or positive"
self.reg_lambda = reg_lambda
self.discount_policy = discount_policy
self.discount_options = discount_options or {"gamma": 1.0, "power": 1.0}
def _run_on_loss(self, net, param_init_net, param, grad=None):
iteration = utils.BuildUniqueMutexIter(param_init_net, net)
# Since we are most likely to do a minimization
discount = net.NextScopedBlob(param + "_log_barrier_discount")
net.LearningRate(
[iteration],
[discount],
base_lr=-self.reg_lambda,
policy=self.discount_policy,
**self.discount_options
)
# TODO(xlwang): param might still be negative at the initialization time or
# slightly negative due to the distributed training. Enforce it's non-negativity
# for now (at least above machine epsilon)
param_non_neg = net.NextScopedBlob(param + "_non_neg")
net.Clip([param], [param_non_neg], min=self.kEpsilon)
param_log = net.NextScopedBlob(param + "_log")
net.Log([param_non_neg], [param_log])
param_log_sum = net.NextScopedBlob(param + "_log_sum")
net.SumElements([param_log], [param_log_sum])
output_blob = net.NextScopedBlob(param + "_log_barrier")
net.Mul([param_log_sum, discount], [output_blob], broadcast=1)
return output_blob
def _run_after_optimizer(self, net, param_init_net, param, grad):
self._ensure_clipped(net, param, grad, min=0, open_range=True)
class BoundedGradientProjection(Regularizer):
"""
Wright, S., & Nocedal, J. (1999). Numerical optimization. Springer Science,
35(67-68), 7. Chapter 16
"""
def __init__(
self, lb=None, ub=None, left_open=False, right_open=False, epsilon=None
):
super(BoundedGradientProjection, self).__init__()
lb = float(lb) if lb is not None else None
ub = float(ub) if ub is not None else None
epsilon = float(epsilon) if epsilon is not None else self.kEpsilon
assert epsilon > 0, "Bounded Gradient Projection with invalid eps={eps}".format(
eps=epsilon
)
assert (
(lb is None)
or (ub is None)
or (
lb + (epsilon if left_open else 0.)
<= ub - (epsilon if right_open else 0.)
)
), (
"Bounded Gradient Projection with invalid "
"{lp}ub={ub}, lb={lb}{rp}, eps={eps}".format(
lb=lb,
ub=ub,
lp="(" if left_open else "[",
rp=")" if right_open else "]",
eps=epsilon,
)
)
self.left_open = left_open
self.right_open = right_open
self.kEpsilon = epsilon
self.lb = lb
self.ub = ub
def _run_after_optimizer(self, net, param_init_net, param, grad):
self._ensure_clipped(
net,
param,
grad,
min=self.lb,
max=self.ub,
left_open=self.left_open,
right_open=self.right_open,
)
class GroupL1Norm(Regularizer):
"""
Scardapane, Simone, et al. "Group sparse regularization for deep neural networks."
Neurocomputing 241 (2017): 81-89.
This regularizer computes l1 norm of a weight matrix based on groups.
There are essentially three stages in the computation:
1. Compute the l2 norm on all the members of each group
2. Scale each l2 norm by the size of each group
3. Compute the l1 norm of the scaled l2 norms
"""
def __init__(self, reg_lambda, groups, stabilizing_val=0):
"""
Args:
reg_lambda: The weight of the regularization term.
groups: A list of integers describing the size of each group.
The length of the list is the number of groups.
Optional Args:
stabilizing_val: The computation of GroupL1Norm involves the Sqrt
operator. When values are small, its gradient can be numerically
unstable and causing gradient explosion. Adding this term to
stabilize gradient calculation. Recommended value of this term is
1e-8, but it depends on the specific scenarios. If the implementation
of the gradient operator of Sqrt has taken into stability into
consideration, this term won't be necessary.
"""
super(GroupL1Norm, self).__init__()
assert (
(reg_lambda) >= 0
), "regularization weight should be 0 or positive"
assert isinstance(groups, list), "groups needs to be a list"
self.reg_lambda = (reg_lambda)
self.groups = groups
self.stabilizing_val = stabilizing_val
def _run_on_loss(self, net, param_init_net, param, grad=None):
"""
Args:
param: The input blob to regularize. It should be a weight matrix
blob with shape (output_dim, input_dim). input_dim should be
equal to the sum of self.groups.
Returns:
group_l1_norm: The output blob after applying regularization.
These are the steps of computation:
1. square all elements
2. sum by row
3. lengthssum by group
4. square_root all elements
5. normalize each group based on group size
6. compute l1 norm of each group
7. scale the result with the regularization lambda
"""
squared = net.Sqr(param)
reduced_sum = net.ReduceSum(squared, axes=[0], keepdims=0)
lengths_sum = net.LengthsSum(
[
reduced_sum,
net.GivenTensorIntFill(
[], 1, shape=[len(self.groups)], values=self.groups
),
]
)
if self.stabilizing_val:
net.Add(
[lengths_sum, net.ConstantFill([], 1, value=self.stabilizing_val)],
[lengths_sum],
broadcast=1,
)
sqrt = net.Sqrt(lengths_sum)
# Here we combine step 5 and step 7 into one operator call to
# improve efficiency: values = np.sqrt(self.groups) * self.reg_lambda
l2_scaled = net.Mul(
[
sqrt,
net.GivenTensorFill(
[],
shape=[len(self.groups)],
values=np.sqrt(self.groups) * self.reg_lambda
)
],
['normalized_l2_norm_scaled']
)
group_l1_norm = net.LpNorm(l2_scaled, ['group_l1_nrom'], p=1)
return group_l1_norm
|
pytorch-master
|
caffe2/python/regularizer.py
|
## @package ideep_test_util
# Module caffe2.python.ideep_test_util
"""
The IDEEP test utils is a small addition on top of the hypothesis test utils
under caffe2/python, which allows one to more easily test IDEEP related
operators.
"""
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import hypothesis_test_util as hu
cpu_do = hu.cpu_do
ideep_do = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.IDEEP)
device_options = hu.device_options + ([ideep_do])
def device_checker_device_options():
return st.just(device_options)
def gradient_checker_device_option():
return st.sampled_from(device_options)
gcs = dict(
gc=gradient_checker_device_option(),
dc=device_checker_device_options()
)
gcs_cpu_only = dict(gc=st.sampled_from([cpu_do]), dc=st.just([cpu_do]))
gcs_ideep_only = dict(gc=st.sampled_from([ideep_do]), dc=st.just([ideep_do]))
gcs_cpu_ideep = dict(gc=st.sampled_from([cpu_do, ideep_do]), dc=st.just([cpu_do, ideep_do]))
|
pytorch-master
|
caffe2/python/ideep_test_util.py
|
from caffe2.python import context, test_util
from threading import Thread
class MyContext(context.Managed):
pass
class DefaultMyContext(context.DefaultManaged):
pass
class ChildMyContext(MyContext):
pass
class TestContext(test_util.TestCase):
def use_my_context(self):
try:
for _ in range(100):
with MyContext() as a:
for _ in range(100):
self.assertTrue(MyContext.current() == a)
except Exception as e:
self._exceptions.append(e)
def testMultiThreaded(self):
threads = []
self._exceptions = []
for _ in range(8):
thread = Thread(target=self.use_my_context)
thread.start()
threads.append(thread)
for t in threads:
t.join()
for e in self._exceptions:
raise e
@MyContext()
def testDecorator(self):
self.assertIsNotNone(MyContext.current())
def testNonDefaultCurrent(self):
with self.assertRaises(AssertionError):
MyContext.current()
ctx = MyContext()
self.assertEqual(MyContext.current(value=ctx), ctx)
self.assertIsNone(MyContext.current(required=False))
def testDefaultCurrent(self):
self.assertIsInstance(DefaultMyContext.current(), DefaultMyContext)
def testNestedContexts(self):
with MyContext() as ctx1:
with DefaultMyContext() as ctx2:
self.assertEqual(DefaultMyContext.current(), ctx2)
self.assertEqual(MyContext.current(), ctx1)
def testChildClasses(self):
with ChildMyContext() as ctx:
self.assertEqual(ChildMyContext.current(), ctx)
self.assertEqual(MyContext.current(), ctx)
|
pytorch-master
|
caffe2/python/context_test.py
|
# @package layer_model_helper
# Module caffe2.python.layer_model_helper
from caffe2.python import core, model_helper, schema, scope, utils, muji
from caffe2.python.modeling.parameter_info import (
ParameterInfo,
)
from caffe2.python.modeling.parameter_sharing import (
parameter_sharing_context,
)
from caffe2.python.modeling.net_modifier import NetModifier
from caffe2.python.optimizer import get_param_device, Optimizer
from caffe2.python.regularizer import Regularizer, RegularizationBy
from caffe2.python.layers import layers
from future.utils import viewitems, viewvalues
import logging
import numpy as np
import copy
logger = logging.getLogger(__name__)
class LayerModelHelper(model_helper.ModelHelper):
"""
Model helper for building models on top of layers abstractions.
Each layer is the abstraction that is higher level than Operator. Layer
is responsible for ownership of it's own parameters and can easily be
instantiated in multiple nets possible with different sets of ops.
As an example: one can easily instantiate predict and train nets from
the same set of layers, where predict net will have subset of the
operators from train net.
"""
def __init__(self, name, input_feature_schema, trainer_extra_schema,
keep_blobs=False,
use_attribution=True):
''' TODO(amalevich): more documnetation on input args
use_attribution:
if True, will generate the atrribution net for feature importance
calculation; Need to turn it to false when FC is quantized as FP16
This attribute access will be consistent with MTML model.
'''
super(LayerModelHelper, self).__init__(name=name)
self._layer_names = set()
self._layers = []
self._param_to_shape = {}
# seed default
self._seed = None
self._sequence_seed = True
# optimizer bookkeeping
self.param_to_optim = {}
self.param_to_reg = {}
self._default_optimizer = None
self._loss = None
self._prediction = []
self._output_schema = None
self._post_grad_net_modifiers = []
self._final_net_modifiers = []
# breakdown map; breakdown features are categorical (like dense) but not
# necessarily used to represent data for training
self._breakdown_map = None
# Connect Schema to self.net. That particular instance of schmea will be
# use for generation of the Layers across the network and would be used
# for connection with Readers.
self._input_feature_schema = schema.NewRecord(
self.net,
input_feature_schema
) if not keep_blobs else input_feature_schema.clone()
self._trainer_extra_schema = schema.NewRecord(
self.net,
trainer_extra_schema
) if not keep_blobs else trainer_extra_schema.clone()
self._metrics_schema = schema.Struct()
self._preproc_output_schema = None
self._init_global_constants()
self.param_init_net = self.create_init_net('param_init_net')
self._initialize_params = True
self._transfer_learning_blob_name_mappings = None
# additional (hard-coded) diagnose_options to report based on the model
# TODO(xlwang): it's hack!
self.ad_hoc_diagnose_blobs_and_operations = []
self.ad_hoc_plot_blobs = []
self.use_attribution = use_attribution
def clear_output_schema(self):
self._output_schema = None
def set_initialize_params(self, initialize_params):
self._initialize_params = initialize_params
def add_metric_field(self, name, value):
assert name not in self._metrics_schema.fields, (
"Try to add metric field twice: {}".format(name))
self._metrics_schema = self._metrics_schema + schema.Struct(
(name, value)
)
# an empty white_set will skip everything
def filter_metrics_schema(self, white_set):
logger.info("Filter metric schema with white_set {}".format(white_set))
field_names = self._metrics_schema.field_names()
for name in field_names:
if name not in white_set:
self._metrics_schema = self._metrics_schema - schema.Struct((name, schema.Scalar()))
def add_ad_hoc_plot_blob(self, blob, dtype=None):
assert isinstance(
blob, (str, core.BlobReference)
), "expect type str or BlobReference, but got {}".format(type(blob))
dtype = dtype or (np.float, (1, ))
self.add_metric_field(str(blob), schema.Scalar(dtype, blob))
self.ad_hoc_plot_blobs.append(blob)
@staticmethod
def _get_global_constant_initializer_op(
blob_name, array=None, dtype=None, initializer=None
):
# to add a global constant to model, one first need to get the
# initializer
if array is not None:
assert initializer is None,\
"Only one from array and initializer should be specified"
if dtype is None:
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
# TODO: make GivenTensor generic
op_name = None
if array.dtype == np.int32:
op_name = 'GivenTensorIntFill'
elif array.dtype == np.int64:
op_name = 'GivenTensorInt64Fill'
elif array.dtype == np.str:
op_name = 'GivenTensorStringFill'
elif array.dtype == np.bool:
op_name = 'GivenTensorBoolFill'
else:
op_name = 'GivenTensorFill'
def initializer(blob_name):
return core.CreateOperator(
op_name, [],
blob_name,
shape=array.shape,
values=array.flatten().tolist()
)
else:
assert initializer is not None
initializer_op = initializer(blob_name)
return initializer_op
def add_global_constant(
self, name, array=None, dtype=None, initializer=None
):
assert isinstance(name, str), (
'name should be a string as we are using it as map key')
# This is global namescope for constants. They will be created in all
# init_nets and there should be very few of them.
assert name not in self.global_constants, \
"%s already added in global_constants" % name
blob_name = self.net.NextBlob(name)
self.global_constants[name] = blob_name
initializer_op = LayerModelHelper._get_global_constant_initializer_op(
blob_name, array, dtype, initializer
)
assert blob_name not in self.global_constant_initializers, \
"there is already a initializer op associated with blob %s" % \
blob_name
self.global_constant_initializers[blob_name] = initializer_op
return blob_name
def maybe_add_global_constant(self, name, *args, **kwargs):
# To ad hoc add new global constants without duplication
# if the name was already registered in global_constants, it will not be
# added even if the intended value is different from its original value
if name in self.global_constants:
blob_name = self.global_constants[name]
initializer_op = \
LayerModelHelper._get_global_constant_initializer_op(
blob_name, *args, **kwargs
)
# check if the original initializer is the same as the one intended
# now
assert utils.OpAlmostEqual(
initializer_op,
self.global_constant_initializers[blob_name],
'debug_info'
), \
"conflict initializers for global constant %s, " \
"previous %s, now %s" % (
blob_name, str(initializer_op),
str(self.global_constant_initializers[blob_name]))
return blob_name
return self.add_global_constant(name, *args, **kwargs)
def _init_global_constants(self):
self.global_constants = {}
self.global_constant_initializers = {}
self.add_global_constant('ONE', 1.0)
self.add_global_constant('NAN', float("NaN"))
self.add_global_constant('ZERO', 0.0)
self.add_global_constant('ZERO_RANGE', [0, 0], dtype='int32')
def _add_global_constants(self, init_net):
for initializer_op in viewvalues(self.global_constant_initializers):
init_net._net.op.extend([initializer_op])
def create_init_net(self, name):
init_net = core.Net(name)
self._add_global_constants(init_net)
return init_net
def _validate_param_shape(self, param_name, shape):
if param_name not in self._param_to_shape:
return
ref_shape = self._param_to_shape[param_name]
if shape != ref_shape:
raise ValueError(
"Got inconsistent shapes between shared parameters "
"when trying to map a blob in scope {0} to {1}. ref_shape : "
" {2}, shape : {3}".format(
scope.CurrentNameScope(), param_name, ref_shape, shape)
)
def _validate_param_optim(self, param_name, optim):
# there are three possible values for optim:
# 1) None (which will use self._default_optimizer after this layer is instantiated)
# 2) self.NoOptim
# 3) an instance of Optimizer class such as AdagradOptimizer
# this implies this parameter is not shared with any other parameter so far
if param_name not in self.param_to_optim:
return
logger.info("{} shares the same parameter with another parameter. "
"Validating if the same optimizer has been specified for them.".format(
param_name,
))
ref_optim = self.param_to_optim[param_name]
if optim is None:
assert ref_optim == self._default_optimizer, (
"Optim for {} is None which will fall back to use default_optimizer. "
"However, the optimizer that has been specified for this shared parameter "
"is {} which is different from default_optimizer {}. "
"Please check the optimizers specified for parameters shared "
"with {} and the default_optimizer to ensure the consistency.".format(
param_name, ref_optim, self._default_optimizer, param_name
)
)
elif optim == self.NoOptim:
assert ref_optim == self.NoOptim, (
"Optim for {} is NoOptim. However, the optimizer for the parameters "
"shared with {} is {} which is different from NoOptim. "
"Please check the optimizer specified for other parameters in the "
"shared group to ensure consistency.".format(
param_name, param_name, ref_optim
)
)
elif isinstance(optim, Optimizer):
assert isinstance(ref_optim, Optimizer), (
"Optim for {} is an instance of Optimizer. However, the optimizer "
"for the parameters shared with {} is {} which is not an instance "
"of Optimizer. Please check the optimizer specified for other "
" parameters in the shared group to ensure consistency.".format(
param_name, param_name, ref_optim, optim
)
)
assert type(optim) is type(ref_optim) and optim.attributes == ref_optim.attributes, (
"Optim for {} is an instance of Optimizer. However, the optimizer "
"for the parameters shared with {} is {}. "
"This optimizer either doesn't have the same type as the current optimizer: "
"{} vs {}, or its attributes such as learning rate are different from "
"that of current optimizer which is {} vs {}. "
"Please check the optimizer specified for other parameters in the "
"shared group to ensure consistency.".format(
param_name, param_name, ref_optim, type(optim), type(ref_optim), optim.attributes, ref_optim.attributes
)
)
else:
raise ValueError("optim should be either None, NoOptim, or an instance of Optimizer, Got {} ".format(optim))
def create_param(self, param_name, shape, initializer, optimizer=None,
ps_param=None, regularizer=None):
if isinstance(param_name, core.BlobReference):
param_name = str(param_name)
elif isinstance(param_name, str):
# Parameter name will be equal to current Namescope that got
# resolved with the respect of parameter sharing of the scopes.
param_name = parameter_sharing_context.get_parameter_name(
param_name)
else:
raise ValueError("Unsupported type for param_name")
param_blob = core.BlobReference(param_name)
if len(initializer) == 1:
init_op_args = {}
else:
assert len(initializer) == 2
init_op_args = copy.deepcopy(initializer[1])
if shape is not None:
assert 'shape' not in init_op_args
init_op_args.update({'shape': shape})
initializer_op = None
if self._initialize_params:
initializer_op = core.CreateOperator(
initializer[0],
[],
param_blob,
**init_op_args
)
param = layers.LayerParameter(
parameter=param_blob,
initializer=initializer_op,
optimizer=optimizer,
ps_param=ps_param,
regularizer=regularizer
)
self._validate_param_shape(param_name, shape)
self._validate_param_optim(param_name, optimizer)
self._param_to_shape[param_name] = shape
return param
def next_layer_name(self, prefix):
base_name = core.ScopedName(prefix)
name = base_name
index = 0
while name in self._layer_names:
name = base_name + '_auto_' + str(index)
index += 1
self._layer_names.add(name)
return name
def add_layer(self, layer):
self._layers.append(layer)
for param in layer.get_parameters():
assert isinstance(param.parameter, core.BlobReference)
self.param_to_optim[str(param.parameter)] = \
param.optimizer or self.default_optimizer
self.params.append(param.parameter)
if isinstance(param, layers.LayerParameter):
logger.info("Add parameter regularizer {0}".format(param.parameter))
self.param_to_reg[param.parameter] = param.regularizer
elif isinstance(param, ParameterInfo):
# TODO:
# Currently, LSTM and RNNcells, which use ModelHelper instead of
# LayerModelHelper as super class, are called in pooling_methods
# In ModelHelper, regularization is not supported in create_param
# We will unify the way of create_param of ModelHelper and
# LayerModelHelper in the future.
logger.info('regularization is unsupported for ParameterInfo object')
else:
raise ValueError(
'unknown object type besides ParameterInfo and LayerParameter: {}'
.format(param)
)
# The primary value of adding everything to self.net - generation of the
# operators right away, i.e. if error happens it'll be detected
# immediately. Other than this - create_x_net should be called.
layer.add_operators(self.net, self.param_init_net)
return layer.output_schema
def get_parameter_blobs(self):
param_blobs = []
for layer in self._layers:
for param in layer.get_parameters():
param_blobs.append(param.parameter)
return param_blobs
def add_post_grad_net_modifiers(self, modifier):
assert modifier not in self._post_grad_net_modifiers,\
"{0} is already in {1}".format(modifier, self._post_grad_net_modifiers)
assert isinstance(modifier, NetModifier),\
"{} has to be a NetModifier instance".format(modifier)
self._post_grad_net_modifiers.append(modifier)
def add_final_net_modifiers(self, modifier):
assert modifier not in self._final_net_modifiers,\
"{0} is already in {1}".format(modifier, self._final_net_modifiers)
assert isinstance(modifier, NetModifier),\
"{} has to be a NetModifier instance".format(modifier)
self._final_net_modifiers.append(modifier)
@property
def seed(self):
return self._seed
@property
def sequence_seed(self):
return self._sequence_seed
def store_seed(self, seed, sequence_seed=True):
# Store seed config that will be applied to each op in the net.
self._seed = seed
# If sequence_seed is True, the i-th op has rand_seed=`seed + i`
self._sequence_seed = sequence_seed
def apply_seed(self, net):
if self._seed:
net.set_rand_seed(self._seed, self._sequence_seed)
@property
def default_optimizer(self):
return self._default_optimizer
@default_optimizer.setter
def default_optimizer(self, optimizer):
self._default_optimizer = optimizer
@property
def input_feature_schema(self):
return self._input_feature_schema
@property
def trainer_extra_schema(self):
return self._trainer_extra_schema
@property
def metrics_schema(self):
"""
Returns the schema that represents model output that should be used for
metric reporting.
During the training/evaluation this schema will be appended to the
schema that represents model output.
"""
return self._metrics_schema
@property
def output_schema(self):
assert self._output_schema is not None
return self._output_schema
@output_schema.setter
def output_schema(self, schema):
assert self._output_schema is None
self._output_schema = schema
@property
def preproc_output_schema(self):
assert self._preproc_output_schema is not None
return self._preproc_output_schema
@preproc_output_schema.setter
def preproc_output_schema(self, schema):
assert self._preproc_output_schema is None
self._preproc_output_schema = schema
@property
def prediction(self):
assert self._prediction, "model prediction is empty"
return self._prediction
def add_prediction(self, prediction, weight=1.0):
assert prediction is not None, "Added prediction should not be None"
self._prediction.append((prediction, weight))
@property
def transfer_learning_blob_name_mappings(self):
return self._transfer_learning_blob_name_mappings
@transfer_learning_blob_name_mappings.setter
def transfer_learning_blob_name_mappings(self, blob_name_mappings):
assert blob_name_mappings is not None, "Transfer learning blob name mappings should not be None"
self._transfer_learning_blob_name_mappings = blob_name_mappings
@property
def loss(self):
assert self._loss is not None
return self._loss
@loss.setter
def loss(self, loss):
assert self._loss is None
self._loss = loss
def has_loss(self):
return self._loss is not None
def add_loss(self, loss, name='unnamed'):
assert loss is not None, "Added loss should not be None"
assert isinstance(loss, schema.Scalar) or isinstance(
loss, schema.Struct
), "Added loss should be a scalar or a struct"
if self._loss is None:
self._loss = schema.Struct((name, loss))
else:
# loss could've been set through model.loss directly which could be
# a scalar
if isinstance(self._loss, schema.Scalar):
self._loss = schema.Struct(('unnamed', self._loss))
prefix_base = name + '_auto_'
index = 0
prefix = name
while prefix in self._loss:
prefix = prefix_base + str(index)
index += 1
loss_struct = schema.Struct((prefix, loss))
self._loss = self._loss + loss_struct
def add_output_schema(self, name, value):
assert value is not None, \
'Added output schema {} should not be None'.format(name)
assert isinstance(value, schema.Scalar) or \
isinstance(value, schema.Struct), \
'Added output schema {} should be a scalar or a struct.\n\
Now it is {}.'.format(name, type(value))
if self._output_schema is None: # be the first field
self._output_schema = schema.Struct((name, value))
else: # merge with other fields
assert name not in self._output_schema.fields, \
'Output Schema Field {} already exists'.format(name)
self._output_schema = \
self._output_schema + schema.Struct((name, value))
def add_trainer_extra_schema(self, trainer_extra_schema):
trainer_extra_record = schema.NewRecord(self.net, trainer_extra_schema)
self._trainer_extra_schema += trainer_extra_record
def __getattr__(self, layer):
def is_functional_layer(layer):
if core.IsOperator(layer):
return True
elif layer.startswith('FunctionalLayer'):
return True
else:
return False
def resolve_functional_layer(layer):
if core.IsOperator(layer):
return layer
elif layer.startswith('FunctionalLayer'):
return layer[len('FunctionalLayer'):]
else:
raise ValueError(
'%s cannot be resolved as functional layer' % layer
)
if layer.startswith('__'):
raise AttributeError(layer)
# TODO(amalevich): Add add support for ifbpy inline documentation
if layers.layer_exists(layer):
def wrapper(*args, **kwargs):
new_layer = layers.create_layer(layer, self, *args, **kwargs)
if kwargs.get("output_to_metrics", False):
new_layer.export_output_for_metrics()
if kwargs.get("params_to_metrics", False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
elif is_functional_layer(layer):
# TODO(xlwang): Desginated layer shadows the usage of an op as a
# single layer. To enforce using an op (e.g. Split) as functional
# layer, one can call 'model.FunctionalLayerSplit'
layer = resolve_functional_layer(layer)
def wrapper(*args, **kwargs):
def apply_operator(net, in_record, out_record, **kwargs):
# TODO(amalevich): Switch to net.operator as soon as it gets
# landed
net.__getattr__(layer)(in_record.field_blobs(),
out_record.field_blobs(),
**kwargs)
if 'name' not in kwargs:
kwargs['name'] = layer
new_layer = layers.create_layer(
'Functional',
self, *args, function=apply_operator,
**kwargs
)
if kwargs.get("output_to_metrics", False):
new_layer.export_output_for_metrics()
if kwargs.get("params_to_metrics", False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
else:
# this needs to be an AttributeError to fit hasattr semantics
raise AttributeError(
"Trying to create non-registered layer: {}".format(layer))
@property
def layers(self):
return self._layers
def apply_regularizers_on_loss(
self,
train_net,
train_init_net,
blob_to_device=None,
):
logger.info("apply regularizer on loss")
for param, regularizer in viewitems(self.param_to_reg):
if regularizer is None:
continue
logger.info("add regularizer {0} for param {1} to loss".format(regularizer, param))
assert isinstance(regularizer, Regularizer)
added_loss_blob = regularizer(train_net, train_init_net, param, grad=None,
by=RegularizationBy.ON_LOSS)
logger.info(added_loss_blob)
if added_loss_blob is not None:
self.add_loss(
schema.Scalar(blob=added_loss_blob),
str(added_loss_blob)
)
def apply_regularizers_after_optimizer(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
logger.info("apply regularizer after optimizer")
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, regularizer in viewitems(self.param_to_reg):
if regularizer is None:
continue
assert isinstance(regularizer, Regularizer)
logger.info("add regularizer {0} for param {1} to optimizer".format(regularizer, param))
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
with core.DeviceScope(device):
regularizer(
train_net, train_init_net, param, grad=grad_map.get(str(param)),
by=RegularizationBy.AFTER_OPTIMIZER
)
def apply_post_grad_net_modifiers(
self,
trainer_net,
trainer_init_net,
grad_map,
blob_to_device=None,
modify_output_record=False,
):
param_grad_map = {param: grad_map[param]
for param in self.param_to_optim.keys() if param in grad_map}
for modifier in self._post_grad_net_modifiers:
modifier(trainer_net, trainer_init_net, param_grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
def apply_final_net_modifiers(
self,
trainer_net,
trainer_init_net,
grad_map,
blob_to_device=None,
modify_output_record=False,
):
for modifier in self._final_net_modifiers:
modifier(trainer_net, trainer_init_net, grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
def apply_optimizers(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, optimizer in viewitems(self.param_to_optim):
assert optimizer is not None, \
"default optimizer must have been set in add_layer"
# note that not all params has gradient and thus we sent None if
# gradient does not exists
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
if device is not None:
# extra info is not applicable for optimizers
del device.extra_info[:]
with core.DeviceScope(device):
optimizer(
train_net, train_init_net, param, grad_map.get(str(param)))
def _GetOne(self):
return self.global_constants['ONE']
# An optimizer which allows us to do NO optimization
def NoOptim(self, *args, **kwargs):
pass
@property
def breakdown_map(self):
return self._breakdown_map
@breakdown_map.setter
def breakdown_map(self, breakdown_map):
# TODO(xlwang): provide more rich feature information in breakdown_map;
# and change the assertion accordingly
assert isinstance(breakdown_map, dict)
assert all(isinstance(k, str) for k in breakdown_map)
assert sorted(breakdown_map.values()) == list(range(len(breakdown_map)))
self._breakdown_map = breakdown_map
|
pytorch-master
|
caffe2/python/layer_model_helper.py
|
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import time
SHAPE_LEN = 4096
NUM_ITER = 1000
GB = 1024 * 1024 * 1024
NUM_REPLICAS = 48
def build_net(net_name, cross_socket):
init_net = core.Net(net_name + "_init")
init_net.Proto().type = "async_scheduling"
numa_device_option = caffe2_pb2.DeviceOption()
numa_device_option.device_type = caffe2_pb2.CPU
numa_device_option.numa_node_id = 0
for replica_id in range(NUM_REPLICAS):
init_net.XavierFill([], net_name + "/input_blob_" + str(replica_id),
shape=[SHAPE_LEN, SHAPE_LEN], device_option=numa_device_option)
net = core.Net(net_name)
net.Proto().type = "async_scheduling"
if cross_socket:
numa_device_option.numa_node_id = 1
for replica_id in range(NUM_REPLICAS):
net.Copy(net_name + "/input_blob_" + str(replica_id),
net_name + "/output_blob_" + str(replica_id),
device_option=numa_device_option)
return init_net, net
def main():
assert workspace.IsNUMAEnabled() and workspace.GetNumNUMANodes() >= 2
single_init, single_net = build_net("single_net", False)
cross_init, cross_net = build_net("cross_net", True)
workspace.CreateNet(single_init)
workspace.RunNet(single_init.Name())
workspace.CreateNet(cross_init)
workspace.RunNet(cross_init.Name())
workspace.CreateNet(single_net)
workspace.CreateNet(cross_net)
for _ in range(4):
t = time.time()
workspace.RunNet(single_net.Name(), NUM_ITER)
dt = time.time() - t
print("Single socket time:", dt)
single_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
print("Single socket BW: {} GB/s".format(single_bw))
t = time.time()
workspace.RunNet(cross_net.Name(), NUM_ITER)
dt = time.time() - t
print("Cross socket time:", dt)
cross_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
print("Cross socket BW: {} GB/s".format(cross_bw))
print("Single BW / Cross BW: {}".format(single_bw / cross_bw))
if __name__ == '__main__':
core.GlobalInit(["caffe2", "--caffe2_cpu_numa_enabled=1"])
main()
|
pytorch-master
|
caffe2/python/numa_benchmark.py
|
from caffe2.python import workspace, crf
from caffe2.python.cnn import CNNModelHelper
from caffe2.python.crf_predict import crf_update_predictions
from caffe2.python.test_util import TestCase
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
class TestCrfDecode(TestCase):
@given(num_tags=st.integers(2, 4), num_words=st.integers(2, 15))
@settings(deadline=2000)
def test_crf_viterbi(self, num_tags, num_words):
model = CNNModelHelper(name='external')
predictions = np.random.randn(num_words, num_tags).astype(np.float32)
transitions = np.random.uniform(
low=-1, high=1, size=(num_tags + 2, num_tags + 2)
).astype(np.float32)
predictions_blob, transitions_blob = (
model.net.AddExternalInputs('predictions', 'crf_transitions')
)
workspace.FeedBlob(str(transitions_blob), transitions)
workspace.FeedBlob(str(predictions_blob), predictions)
crf_layer = crf.CRFWithLoss(model, num_tags, transitions_blob)
updated_predictions = crf_update_predictions(
model, crf_layer, predictions_blob
)
ref_predictions = crf_layer.update_predictions(predictions_blob)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
updated_predictions = workspace.FetchBlob(str(updated_predictions))
ref_predictions = workspace.FetchBlob(str(ref_predictions))
np.testing.assert_allclose(
updated_predictions,
ref_predictions,
atol=1e-4, rtol=1e-4, err_msg='Mismatch in CRF predictions'
)
|
pytorch-master
|
caffe2/python/crf_viterbi_test.py
|
import numpy as np
import unittest
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import brew, core, model_helper, rnn_cell
import caffe2.python.workspace as ws
class TestObservers(unittest.TestCase):
def setUp(self):
core.GlobalInit(["python", "caffe2"])
ws.ResetWorkspace()
self.model = model_helper.ModelHelper()
brew.fc(self.model, "data", "y",
dim_in=4, dim_out=2,
weight_init=('ConstantFill', dict(value=1.0)),
bias_init=('ConstantFill', dict(value=0.0)),
axis=0)
ws.FeedBlob("data", np.zeros([4], dtype='float32'))
ws.RunNetOnce(self.model.param_init_net)
ws.CreateNet(self.model.net)
def testObserver(self):
ob = self.model.net.AddObserver("TimeObserver")
ws.RunNet(self.model.net)
print(ob.average_time())
num = self.model.net.NumObservers()
self.model.net.RemoveObserver(ob)
assert(self.model.net.NumObservers() + 1 == num)
@given(
num_layers=st.integers(1, 4),
forward_only=st.booleans()
)
@settings(deadline=1000)
def test_observer_rnn_executor(self, num_layers, forward_only):
'''
Test that the RNN executor produces same results as
the non-executor (i.e running step nets as sequence of simple nets).
'''
Tseq = [2, 3, 4]
batch_size = 10
input_dim = 3
hidden_dim = 3
run_cnt = [0] * len(Tseq)
avg_time = [0] * len(Tseq)
for j in range(len(Tseq)):
T = Tseq[j]
ws.ResetWorkspace()
ws.FeedBlob(
"seq_lengths",
np.array([T] * batch_size, dtype=np.int32)
)
ws.FeedBlob("target", np.random.rand(
T, batch_size, hidden_dim).astype(np.float32))
ws.FeedBlob("hidden_init", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
ws.FeedBlob("cell_init", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
model = model_helper.ModelHelper(name="lstm")
model.net.AddExternalInputs(["input"])
init_blobs = []
for i in range(num_layers):
hidden_init, cell_init = model.net.AddExternalInputs(
"hidden_init_{}".format(i),
"cell_init_{}".format(i)
)
init_blobs.extend([hidden_init, cell_init])
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob="input",
seq_lengths="seq_lengths",
initial_states=init_blobs,
dim_in=input_dim,
dim_out=[hidden_dim] * num_layers,
drop_states=True,
forward_only=forward_only,
return_last_layer_only=True,
)
loss = model.AveragedLoss(
model.SquaredL2Distance([output, "target"], "dist"),
"loss"
)
# Add gradient ops
if not forward_only:
model.AddGradientOperators([loss])
# init
for init_blob in init_blobs:
ws.FeedBlob(init_blob, np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
ws.RunNetOnce(model.param_init_net)
# Run with executor
self.enable_rnn_executor(model.net, 1, forward_only)
np.random.seed(10022015)
input_shape = [T, batch_size, input_dim]
ws.FeedBlob(
"input",
np.random.rand(*input_shape).astype(np.float32)
)
ws.FeedBlob(
"target",
np.random.rand(
T,
batch_size,
hidden_dim
).astype(np.float32)
)
ws.CreateNet(model.net, overwrite=True)
time_ob = model.net.AddObserver("TimeObserver")
run_cnt_ob = model.net.AddObserver("RunCountObserver")
ws.RunNet(model.net)
avg_time[j] = time_ob.average_time()
run_cnt[j] = int(''.join(x for x in run_cnt_ob.debug_info() if x.isdigit()))
model.net.RemoveObserver(time_ob)
model.net.RemoveObserver(run_cnt_ob)
print(avg_time)
print(run_cnt)
self.assertTrue(run_cnt[1] > run_cnt[0] and run_cnt[2] > run_cnt[1])
self.assertEqual(run_cnt[1] - run_cnt[0], run_cnt[2] - run_cnt[1])
def enable_rnn_executor(self, net, value, forward_only):
num_found = 0
for op in net.Proto().op:
if op.type.startswith("RecurrentNetwork"):
for arg in op.arg:
if arg.name == 'enable_rnn_executor':
arg.i = value
num_found += 1
# This sanity check is so that if someone changes the
# enable_rnn_executor parameter name, the test will
# start failing as this function will become defective.
self.assertEqual(1 if forward_only else 2, num_found)
|
pytorch-master
|
caffe2/python/observer_test.py
|
import numpy as np
import unittest
from caffe2.python import core, workspace, tt_core
import caffe2.python.hypothesis_test_util as hu
class TestTTSVD(hu.HypothesisTestCase):
def test_full_tt_svd(self):
size = 256
np.random.seed(1234)
X = np.expand_dims(
np.random.rand(size).astype(np.float32), axis=0)
W = np.random.rand(size, size).astype(np.float32)
b = np.zeros(size).astype(np.float32)
inp_sizes = [4, 4, 4, 4]
out_sizes = [4, 4, 4, 4]
op_fc = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.RunOperatorOnce(op_fc)
Y_fc = workspace.FetchBlob("Y").flatten()
# Testing TT-decomposition with high ranks
full_tt_ranks = [1, 16, 256, 16, 1]
full_cores = tt_core.matrix_to_tt(W, inp_sizes, out_sizes,
full_tt_ranks)
full_op_tt = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=full_tt_ranks,
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("b", b)
workspace.FeedBlob("cores", full_cores)
workspace.RunOperatorOnce(full_op_tt)
Y_full_tt = workspace.FetchBlob("Y").flatten()
assert(len(Y_fc) == len(Y_full_tt))
self.assertAlmostEquals(np.linalg.norm(Y_fc - Y_full_tt), 0, delta=1e-3)
# Testing TT-decomposition with minimal ranks
sparse_tt_ranks = [1, 1, 1, 1, 1]
sparse_cores = tt_core.matrix_to_tt(W, inp_sizes, out_sizes,
sparse_tt_ranks)
sparse_op_tt = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=sparse_tt_ranks,
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("b", b)
workspace.FeedBlob("cores", sparse_cores)
workspace.RunOperatorOnce(sparse_op_tt)
Y_sparse_tt = workspace.FetchBlob("Y").flatten()
assert(len(Y_fc) == len(Y_sparse_tt))
self.assertAlmostEquals(np.linalg.norm(Y_fc - Y_sparse_tt),
39.974, delta=1e-3)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/tt_core_test.py
|
## @package lstm_benchmark
# Module caffe2.python.lstm_benchmark
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, utils, rnn_cell, model_helper
from caffe2.python import recurrent
import argparse
import numpy as np
import time
import logging
logging.basicConfig()
log = logging.getLogger("lstm_bench")
log.setLevel(logging.DEBUG)
def generate_data(T, shape, num_labels, fixed_shape):
'''
Fill a queue with input data
'''
log.info("Generating T={} sequence batches".format(T))
generate_input_init_net = core.Net('generate_input_init')
queue = generate_input_init_net.CreateBlobsQueue(
[], "inputqueue", num_blobs=1, capacity=T,
)
label_queue = generate_input_init_net.CreateBlobsQueue(
[], "labelqueue", num_blobs=1, capacity=T,
)
workspace.RunNetOnce(generate_input_init_net)
generate_input_net = core.Net('generate_input')
generate_input_net.EnqueueBlobs([queue, "scratch"], ["scratch"])
generate_input_net.EnqueueBlobs([label_queue, "label_scr"], ["label_scr"])
np.random.seed(2603)
entry_counts = []
for t in range(T):
if (t % (max(10, T // 10)) == 0):
print("Generating data {}/{}".format(t, T))
# Randomize the seqlength
random_shape = (
[np.random.randint(1, shape[0])] + shape[1:]
if t > 0 and not fixed_shape else shape
)
X = np.random.rand(*random_shape).astype(np.float32)
batch_size = random_shape[1]
L = num_labels * batch_size
labels = (np.random.rand(random_shape[0]) * L).astype(np.int32)
workspace.FeedBlob("scratch", X)
workspace.FeedBlob("label_scr", labels)
workspace.RunNetOnce(generate_input_net.Proto())
entry_counts.append(random_shape[0] * random_shape[1])
log.info("Finished data generation")
return queue, label_queue, entry_counts
def create_model(args, queue, label_queue, input_shape):
model = model_helper.ModelHelper(name="LSTM_bench")
seq_lengths, target = \
model.net.AddExternalInputs(
'seq_lengths',
'target',
)
input_blob = model.net.DequeueBlobs(queue, "input_data")
labels = model.net.DequeueBlobs(label_queue, "label")
init_blobs = []
if args.implementation in ["own", "static", "static_dag"]:
T = None
if "static" in args.implementation:
assert args.fixed_shape, \
"Random input length is not static RNN compatible"
T = args.seq_length
print("Using static RNN of size {}".format(T))
for i in range(args.num_layers):
hidden_init, cell_init = model.net.AddExternalInputs(
"hidden_init_{}".format(i),
"cell_init_{}".format(i)
)
init_blobs.extend([hidden_init, cell_init])
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob=input_blob,
seq_lengths=seq_lengths,
initial_states=init_blobs,
dim_in=args.input_dim,
dim_out=[args.hidden_dim] * args.num_layers,
scope="lstm1",
memory_optimization=args.memory_optimization,
forward_only=args.forward_only,
drop_states=True,
return_last_layer_only=True,
static_rnn_unroll_size=T,
)
if "dag" in args.implementation:
print("Using DAG net type")
model.net.Proto().type = 'dag'
model.net.Proto().num_workers = 4
elif args.implementation == "cudnn":
# We need to feed a placeholder input so that RecurrentInitOp
# can infer the dimensions.
init_blobs = model.net.AddExternalInputs("hidden_init", "cell_init")
model.param_init_net.ConstantFill([], input_blob, shape=input_shape)
output, last_hidden, _ = rnn_cell.cudnn_LSTM(
model=model,
input_blob=input_blob,
initial_states=init_blobs,
dim_in=args.input_dim,
dim_out=args.hidden_dim,
scope="cudnnlstm",
num_layers=args.num_layers,
)
else:
assert False, "Unknown implementation"
weights = model.net.UniformFill(labels, "weights")
softmax, loss = model.net.SoftmaxWithLoss(
[model.Flatten(output), labels, weights],
['softmax', 'loss'],
)
if not args.forward_only:
model.AddGradientOperators([loss])
# carry states over
for init_blob in init_blobs:
model.net.Copy(last_hidden, init_blob)
sz = args.hidden_dim
if args.implementation == "cudnn":
sz *= args.num_layers
workspace.FeedBlob(init_blob, np.zeros(
[1, args.batch_size, sz], dtype=np.float32
))
if args.rnn_executor:
for op in model.net.Proto().op:
if op.type.startswith('RecurrentNetwork'):
recurrent.set_rnn_executor_config(
op,
num_threads=args.rnn_executor_num_threads,
max_cuda_streams=args.rnn_executor_max_cuda_streams,
)
return model, output
def Caffe2LSTM(args):
T = args.data_size // args.batch_size
input_blob_shape = [args.seq_length, args.batch_size, args.input_dim]
queue, label_queue, entry_counts = generate_data(T // args.seq_length,
input_blob_shape,
args.hidden_dim,
args.fixed_shape)
workspace.FeedBlob(
"seq_lengths",
np.array([args.seq_length] * args.batch_size, dtype=np.int32)
)
model, output = create_model(args, queue, label_queue, input_blob_shape)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
start_time = time.time()
num_iters = T // args.seq_length
total_iters = 0
# Run the Benchmark
log.info("------ Warming up ------")
workspace.RunNet(model.net.Proto().name)
if (args.gpu):
log.info("Memory stats:")
stats = utils.GetGPUMemoryUsageStats()
log.info("GPU memory:\t{} MB".format(stats['max_total'] / 1024 / 1024))
log.info("------ Starting benchmark ------")
start_time = time.time()
last_time = time.time()
for iteration in range(1, num_iters, args.iters_to_report):
iters_once = min(args.iters_to_report, num_iters - iteration)
total_iters += iters_once
workspace.RunNet(model.net.Proto().name, iters_once)
new_time = time.time()
log.info(
"Iter: {} / {}. Entries Per Second: {}k.".format(
iteration,
num_iters,
np.sum(entry_counts[iteration:iteration + iters_once]) /
(new_time - last_time) // 100 / 10,
)
)
last_time = new_time
log.info("Done. Total EPS excluding 1st iteration: {}k {}".format(
np.sum(entry_counts[1:]) / (time.time() - start_time) // 100 / 10,
" (with RNN executor)" if args.rnn_executor else "",
))
if (args.gpu):
log.info("Memory stats:")
stats = utils.GetGPUMemoryUsageStats()
log.info("GPU memory:\t{} MB".format(stats['max_total'] / 1024 / 1024))
if (stats['max_total'] != stats['total']):
log.warning(
"Max usage differs from current total usage: {} > {}".
format(stats['max_total'], stats['total'])
)
log.warning("This means that costly deallocations occurred.")
return time.time() - start_time
@utils.debug
def Benchmark(args):
return Caffe2LSTM(args)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="LSTM benchmark.")
parser.add_argument(
"--hidden_dim",
type=int,
default=800,
help="Hidden dimension",
)
parser.add_argument(
"--input_dim",
type=int,
default=40,
help="Input dimension",
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument(
"--seq_length",
type=int,
default=20,
help="Max sequence length"
)
parser.add_argument(
"--data_size",
type=int,
default=1000000,
help="Number of data points to generate"
)
parser.add_argument(
"--iters_to_report",
type=int,
default=20,
help="Number of iteration to report progress"
)
parser.add_argument(
"--gpu",
action="store_true",
help="Run all on GPU",
)
parser.add_argument(
"--implementation",
type=str,
default="own",
help="'cudnn', 'own', 'static' or 'static_dag'",
)
parser.add_argument(
"--fixed_shape",
action="store_true",
help=("Whether to randomize shape of input batches. "
"Static RNN requires fixed shape"),
)
parser.add_argument(
"--memory_optimization",
action="store_true",
help="Whether to use memory optimized LSTM or not",
)
parser.add_argument(
"--forward_only",
action="store_true",
help="Whether to run only forward pass"
)
parser.add_argument(
"--num_layers",
type=int,
default=1,
help="Number of LSTM layers. All output dimensions are going to be"
"of hidden_dim size",
)
parser.add_argument(
"--rnn_executor",
action="store_true",
help="Whether to use RNN executor"
)
parser.add_argument(
"--rnn_executor_num_threads",
type=int,
default=None,
help="Number of threads used by CPU RNN Executor"
)
parser.add_argument(
"--rnn_executor_max_cuda_streams",
type=int,
default=None,
help="Maximum number of CUDA streams used by RNN executor on GPU"
)
return parser
if __name__ == '__main__':
args, extra_args = GetArgumentParser().parse_known_args()
rnn_executor_opt = 1 if args.rnn_executor else 0
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_rnn_executor={}'.format(rnn_executor_opt),
'--caffe2_gpu_memory_tracking=1'] + extra_args)
device = core.DeviceOption(
workspace.GpuDeviceType if args.gpu else caffe2_pb2.CPU, 4)
with core.DeviceScope(device):
Benchmark(args)
|
pytorch-master
|
caffe2/python/lstm_benchmark.py
|
## @package tt_core
# Module caffe2.python.tt_core
import numpy as np
"""
The following methods are various utility methods for using the Tensor-Train
decomposition, or TT-decomposition introduced by I. V. Oseledets (2011) in his
paper (http://epubs.siam.org/doi/abs/10.1137/090752286).
Broadly speaking, these methods are used to replace fully connected layers in
neural networks with Tensor-Train layers introduced by A. Novikov et. al. (2015)
in their paper (http://arxiv.org/abs/1509.06569). More details about each of
the methods are provided in each respective docstring.
"""
def init_tt_cores(inp_sizes, out_sizes, tt_ranks, seed=1234):
"""
Initialize randomized orthogonalized TT-cores.
This method should be used when a TT-layer is trained from scratch. The
sizes of each of the cores are specified by the inp_sizes and out_sizes, and
the respective tt_ranks will dictate the ranks of each of the cores. Note
that a larger set of tt_ranks will result in slower computation but will
result in more accurate approximations. The size of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
Args:
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
seed: integer to seed the random number generator
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
np.random.seed(seed)
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dims (" + \
str(len(out_sizes)) + ")."
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Initialize the cores array
cores_len = np.sum(
inp_sizes * out_sizes * tt_ranks[1:] * tt_ranks[:-1])
cores = np.zeros(cores_len)
cores_idx = 0
rv = 1
# Compute the full list of cores by computing each individual one
for i in range(inp_sizes.shape[0]):
shape = [tt_ranks[i],
inp_sizes[i],
out_sizes[i],
tt_ranks[i + 1]]
# Precompute the shape of each core
tall_shape = (np.prod(shape[:3]), shape[3])
# Randomly initialize the current core using a normal distribution
curr_core = np.dot(rv, np.random.normal(
0, 1, size=(shape[0], np.prod(shape[1:]))))
curr_core = curr_core.reshape(tall_shape)
# Orthogonalize the initialized current core and append to cores list
if i < inp_sizes.shape[0] - 1:
curr_core, rv = np.linalg.qr(curr_core)
cores[cores_idx:cores_idx +
curr_core.size] = curr_core.flatten()
cores_idx += curr_core.size
# Normalize the list of arrays using this Glarot trick
glarot_style = (np.prod(inp_sizes) *
np.prod(tt_ranks))**(1.0 / inp_sizes.shape[0])
return (0.1 / glarot_style) * np.array(cores).astype(np.float32)
def matrix_to_tt(W, inp_sizes, out_sizes, tt_ranks):
"""
Convert a matrix into the TT-format.
This method will consume a 2D weight matrix such as those used in fully
connected layers in a neural network and will compute the TT-decomposition
of the weight matrix and return the TT-cores of the resulting computation.
This method should be used when converting a trained, fully connected layer,
into a TT-layer for increased speed and decreased parameter size. The size
of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
We also require that np.prod(inp_sizes) == W.shape[0] and that
np.prod(out_sizes) == W.shape[1].
Args:
W: two-dimensional weight matrix numpy array representing a fully
connected layer to be converted to TT-format; note that the weight
matrix is transposed before decomposed because we want to emulate the
X * W^T operation that the FC layer performs.
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
Returns:
new_cores: One-dimensional list of cores concatentated along an axis
"""
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dimensions (" + \
str(len(out_sizes)) + ")."
assert(W.shape[0] == np.prod(inp_sizes)), \
"The product of the input sizes (" + str(np.prod(inp_sizes)) + \
") must be equal to first dimension of W (" + str(W.shape[0]) + ")."
assert(W.shape[1] == np.prod(out_sizes)), \
"The product of the output sizes (" + str(np.prod(out_sizes)) + \
") must be equal to second dimension of W (" + str(W.shape[1]) + ")."
# W is transposed so that the multiplication X * W^T can be computed, just
# as it is in the FC layer.
W = W.transpose()
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Copy the original weight matrix in order to permute and reshape the weight
# matrix. In addition, the inp_sizes and out_sizes are combined to a single
# sizes array to use the tt_svd helper method, which only consumes a single
# sizes array.
W_copy = W.copy()
total_inp_size = inp_sizes.size
W_copy = np.reshape(W_copy, np.concatenate((inp_sizes, out_sizes)))
order = np.repeat(np.arange(0, total_inp_size), 2) + \
np.tile([0, total_inp_size], total_inp_size)
W_copy = np.transpose(W_copy, axes=order)
W_copy = np.reshape(W_copy, inp_sizes * out_sizes)
# Use helper method to convert the W matrix copy into the preliminary
# cores array.
cores = tt_svd(W_copy, inp_sizes * out_sizes, tt_ranks)
# Permute the dimensions of each of the cores to be compatible with the
# TT-layer.
new_cores = np.zeros(cores.shape).astype(np.float32)
idx = 0
for i in range(len(inp_sizes)):
shape = (tt_ranks[i], inp_sizes[i], out_sizes[i], tt_ranks[i + 1])
current_core = cores[idx:idx + np.prod(shape)].reshape(shape)
current_core = current_core.transpose((1, 3, 0, 2))
new_cores[new_cores.shape[0] - idx - np.prod(shape):
new_cores.shape[0] - idx] \
= current_core.flatten()
idx += np.prod(shape)
return new_cores
def tt_svd(W, sizes, tt_ranks):
"""
Helper method for the matrix_to_tt() method performing the TT-SVD
decomposition.
Uses the TT-decomposition algorithm to convert a matrix to TT-format using
multiple reduced SVD operations.
Args:
W: two-dimensional weight matrix representing a fully connected layer to
be converted to TT-format preprocessed by the matrix_to_tt() method.
sizes: list of the dimensions of each of the cores
tt_ranks: list of the ranks of the respective cores
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
assert(len(tt_ranks) == len(sizes) + 1)
C = W.copy()
total_size = sizes.size
core = np.zeros(np.sum(tt_ranks[:-1] * sizes * tt_ranks[1:]),
dtype='float32')
# Compute iterative reduced SVD operations and store each resulting U matrix
# as an individual core.
pos = 0
for i in range(0, total_size - 1):
shape = tt_ranks[i] * sizes[i]
C = np.reshape(C, [shape, -1])
U, S, V = np.linalg.svd(C, full_matrices=False)
U = U[:, 0:tt_ranks[i + 1]]
S = S[0:tt_ranks[i + 1]]
V = V[0:tt_ranks[i + 1], :]
core[pos:pos + tt_ranks[i] * sizes[i] * tt_ranks[i + 1]] = U.ravel()
pos += tt_ranks[i] * sizes[i] * tt_ranks[i + 1]
C = np.dot(np.diag(S), V)
core[pos:pos + tt_ranks[total_size - 1] *
sizes[total_size - 1] * tt_ranks[total_size]] = C.ravel()
return core
# TODO(Surya) Write a method to convert an entire network where all fully
# connected layers are replaced by an TT layer.
def fc_net_to_tt_net(net):
pass
|
pytorch-master
|
caffe2/python/tt_core.py
|
from caffe2.python import core, test_util
from caffe2.proto import caffe2_pb2
import caffe2.python.nomnigraph as ng
from hypothesis import given
import hypothesis.strategies as st
import random
class TestBindings(test_util.TestCase):
def test_simple(self):
nn = ng.NNModule()
dfg = nn.dataFlow
dfg.createNode(ng.NeuralNetData("X"))
dfg.createNode(ng.NeuralNetOperator("FC"))
assert len(nn.dataFlow.getMutableNodes()) == 2
def test_core_net_simple(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
for node in nn.dataFlow.getMutableNodes():
if node.isOperator():
assert node.getName() == "FC"
elif node.isTensor():
assert node.getName() in ["X", "W", "Y"]
def test_core_net_controlflow(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
net.Relu(["Y"], ["Z"])
nn = ng.NNModule(net)
assert len(nn.controlFlow) == 2
for instr in nn.controlFlow:
assert instr.getType() == "Operator"
assert nn.controlFlow[0].getName() == "FC"
assert nn.controlFlow[1].getName() == "Relu"
def test_core_net_nn_accessors(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
net.Relu(["Y"], ["Z"])
nn = ng.NNModule(net)
tensors = set()
for t in nn.tensors:
tensors.add(t.name)
assert tensors == set(["X", "W", "Y", "Z"])
ops = set()
for op in nn.operators:
ops.add(op.name)
assert ops == set(["FC", "Relu"])
nodes = set()
for node in nn.nodes:
nodes.add(node.name)
assert nodes == (ops | tensors)
def test_netdef_simple(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net.Proto())
for node in nn.dataFlow.getMutableNodes():
if node.isOperator():
assert node.getOperator().getName() == "FC"
elif node.isTensor():
assert node.getTensor().getName() in ["X", "W", "Y"]
def test_operatordef_simple(self):
nn = ng.NNModule()
dfg = nn.dataFlow
op = core.CreateOperator("Ceil", ["X"], ["Y"], engine="CUDNN")
dfg.createNode(op)
for node in dfg.getMutableNodes():
assert node.isOperator()
assert node.getOperator().getName() == "Ceil"
def test_invalid_node(self):
nn = ng.NNModule()
dfg = nn.dataFlow
with self.assertRaises(Exception):
dfg.createNode(7)
def test_edges_simple(self):
nn = ng.NNModule()
dfg = nn.dataFlow
x = dfg.createNode(ng.NeuralNetData("X"))
w = dfg.createNode(ng.NeuralNetData("W"))
op = dfg.createNode(ng.NeuralNetOperator("Op"))
with self.assertRaises(Exception):
dfg.createEdge(x, w)
dfg.createEdge(op, w)
dfg.createEdge(x, op)
# Dot generation
assert(str(dfg).startswith("digraph G"))
# subgraph
sg = ng.NNSubgraph()
sg.addNode(x)
sg.addNode(op)
sg.induceEdges()
assert len(sg) == 2
# subgraph dot generation
assert(str(sg).startswith("digraph G"))
@given(size=st.sampled_from([10, 50]))
def test_edges_complex(self, size):
random.seed(1337)
nn = ng.NNModule()
dfg = nn.dataFlow
data = []
ops = []
for _ in range(size):
data.append(dfg.createNode(ng.NeuralNetData("X")))
for i in range(size):
ops.append(dfg.createNode(ng.NeuralNetOperator("Op" + str(i))))
for i in range(size):
for j in range(size):
if bool(random.getrandbits(1)):
dfg.createEdge(data[i], ops[j])
def test_traversal(self):
net = core.Net("test")
net.FC(["X", "W"], ["Y"])
net.Relu(["Y"], ["Z"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
relu = nn.controlFlow[1]
assert not fc.inputs[0].hasProducer()
assert fc.inputs[0].name == "X"
assert fc.inputs[1].name == "W"
assert relu.outputs[0].name == "Z"
assert relu.inputs[0].name == "Y"
assert relu.inputs[0].hasProducer()
assert relu.inputs[0].producer.name == "FC"
assert fc.outputs[0].consumers[0].name == "Relu"
def test_debug(self):
nn = ng.NNModule()
dfg = nn.dataFlow
dfg.createNode(ng.NeuralNetData("X"))
dfg.createNode(ng.NeuralNetData("W"))
dfg.createNode(ng.NeuralNetOperator("Op"))
ng.render(nn.dataFlow)
def test_match_graph_node(self):
mg = ng.NNMatchGraph()
mg.createNode(ng.NeuralNetOperator("test"))
nn = ng.NNModule()
test = nn.dataFlow.createNode(ng.NeuralNetOperator("test"))
x = nn.dataFlow.createNode(ng.NeuralNetData("X"))
nn.dataFlow.createEdge(x, test)
count = 0
for match in nn.match(mg):
assert len(match) == 1
count += 1
# Dot generation of subgraph
assert(str(match).startswith("digraph G"))
assert count == 1
def test_match_graph_node_strict(self):
mg = ng.NNMatchGraph()
mg.createNode(ng.NeuralNetOperator("test"), strict=True)
nn = ng.NNModule()
test = nn.dataFlow.createNode(ng.NeuralNetOperator("test"))
x = nn.dataFlow.createNode(ng.NeuralNetData("X"))
nn.dataFlow.createEdge(test, x)
count = 0
for match in nn.match(mg):
assert len(match) == 1
count += 1
with self.assertRaises(Exception):
assert count == 1
def test_match_graph(self):
mg = ng.NNMatchGraph()
test2m = mg.createNode(ng.NeuralNetOperator("test2"), strict=True)
xm = mg.createNode(ng.NeuralNetData("X"), strict=True)
testm = mg.createNode(ng.NeuralNetOperator("test"))
mg.createEdge(test2m, xm)
mg.createEdge(xm, testm)
nn = ng.NNModule()
test2 = nn.dataFlow.createNode(ng.NeuralNetOperator("test2"))
x = nn.dataFlow.createNode(ng.NeuralNetData("X"))
test = nn.dataFlow.createNode(ng.NeuralNetOperator("test"))
nn.dataFlow.createEdge(test2, x)
nn.dataFlow.createEdge(x, test)
count = 0
for match in nn.match(mg):
print(len(match))
assert len(match) == 3
count += 1
assert count == 1
def test_delete_subgraph(self):
mg = ng.NNMatchGraph()
test2m = mg.createNode(ng.NeuralNetOperator("test2"), strict=True)
xm = mg.createNode(ng.NeuralNetData("X"), strict=True)
testm = mg.createNode(ng.NeuralNetOperator("test"))
mg.createEdge(test2m, xm)
mg.createEdge(xm, testm)
nn = ng.NNModule()
test2 = nn.dataFlow.createNode(ng.NeuralNetOperator("test2"))
x = nn.dataFlow.createNode(ng.NeuralNetData("X"))
test = nn.dataFlow.createNode(ng.NeuralNetOperator("test"))
nn.dataFlow.createEdge(test2, x)
nn.dataFlow.createEdge(x, test)
for m in nn.match(mg):
match = m
nn.deleteSubgraph(match)
assert len(nn.controlFlow) == 0
def test_replace_subraph(self):
mg = ng.NNMatchGraph()
test2m = mg.createNode(ng.NeuralNetOperator("test2"), strict=True)
xm = mg.createNode(ng.NeuralNetData("X"), strict=True)
testm = mg.createNode(ng.NeuralNetOperator("test"))
mg.createEdge(test2m, xm)
mg.createEdge(xm, testm)
nn = ng.NNModule()
test2 = nn.dataFlow.createNode(ng.NeuralNetOperator("test2"))
x = nn.dataFlow.createNode(ng.NeuralNetData("X"))
test = nn.dataFlow.createNode(ng.NeuralNetOperator("test"))
nn.dataFlow.createEdge(test2, x)
nn.dataFlow.createEdge(x, test)
for m in nn.match(mg):
match = m
new_op = nn.dataFlow.createNode(ng.NeuralNetOperator("new_op"))
nn.replaceSubgraph(match, new_op, [], [])
assert len(nn.controlFlow) == 1
assert nn.controlFlow[0].name == "new_op"
def test_genericGraph(self):
g = ng.Graph()
n1 = g.createNode("hello1")
n2 = g.createNode("hello2")
e = g.createEdge(n1, n2)
ng.render(g)
def test_createUniqueDataNode(self):
net = core.Net("name")
nn = ng.NNModule(net)
n1 = nn.createUniqueDataNode("a")
self.assertEqual(n1.name[0], "a")
n2 = nn.dataFlow.createNode(ng.Operator("test1"))
nn.createEdge(n1, n2)
n3 = nn.createUniqueDataNode("a")
nn.createEdge(n2, n3)
self.assertEqual(n3.name[0], "a")
self.assertNotEqual(n1.name, n3.name)
n1 = nn.createUniqueDataNode("b")
n2 = nn.createUniqueDataNode("b")
self.assertNotEqual(n1.name, n2.name)
def test_convertToProto(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
new_netdef = nn.convertToCaffe2Proto()
print(new_netdef)
print(net.Proto())
assert len(new_netdef.op) == len(net.Proto().op)
for i in range(len(new_netdef.op)):
op = net.Proto().op[i]
new_op = new_netdef.op[i]
assert op.type == new_op.type
assert len(op.input) == len(new_op.input)
assert len(op.output) == len(new_op.output)
for a, b in zip(op.input, new_op.input):
assert a == b
for a, b in zip(op.output, new_op.output):
assert a == b
for a, b in zip(new_netdef.external_input, net.Proto().external_input):
assert a == b
for a, b in zip(new_netdef.external_output, net.Proto().external_output):
assert a == b
def test_node_interactions(self):
nn = ng.NNModule()
dfg = nn.dataFlow
test1 = dfg.createNode(ng.Operator("test1"))
test2 = dfg.createNode(ng.Operator("test2"))
x = dfg.createNode(ng.Data("x"))
dfg.createEdge(test1, x)
dfg.createEdge(x, test2)
p = test2.getOperatorPredecessors()
assert len(p) == 1
assert p[0] == test1
# Add another node
test3 = dfg.createNode(ng.Operator("test3"))
y = dfg.createNode(ng.Data("y"))
dfg.createEdge(test3, y)
dfg.createEdge(y, test2)
p = test2.getOperatorPredecessors()
assert len(p) == 2
assert test1 in p
assert test3 in p
# Successors
assert len(test2.getOperatorSuccessors()) == 0
assert len(test1.getOperatorSuccessors()) == 1
assert test1.getOperatorSuccessors()[0] == test2
# Check all the nodes are valid (pybind ownership test)
for node in [test1, test2, test3]:
assert node.isOperator()
for node in [x, y]:
assert node.isTensor()
def test_delete_node(self):
nn = ng.NNModule()
node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
nn.dataFlow.deleteNode(node)
assert len(nn.dataFlow.getMutableNodes()) == 0
def test_replace_producer(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
test_op = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
nn.replaceProducer(fc.outputs[0], test_op)
nn.deleteNode(fc)
assert len(nn.controlFlow) == 1
assert nn.controlFlow[0].name == "TestOp"
def test_replace_all_uses_with(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
net.FC(["X", "W2"], ["Y2"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
test_tensor = nn.dataFlow.createNode(ng.NeuralNetData("T"))
nn.replaceAllUsesWith(fc.inputs[0], test_tensor)
for op in nn.controlFlow:
assert op.inputs[0].name == "T"
def test_replace_as_consumer(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
test_op = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
nn.replaceAsConsumer(fc, test_op)
nn.deleteNode(fc)
assert len(nn.controlFlow) == 1
assert nn.controlFlow[0].name == "TestOp"
assert nn.controlFlow[0].inputs[0].name == "X"
assert nn.controlFlow[0].inputs[1].name == "W"
def test_annotation_basic(self):
annot = ng.Annotation()
annot.setDevice("woot")
assert annot.getDevice() == "woot"
annot.setDeviceType(7)
assert annot.getDeviceType() == 7
def test_annotation_from_graph(self):
nn = ng.NNModule()
node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
annot = node.getAnnotation()
annot.setDeviceType(7)
node.setAnnotation(annot)
new_annot = node.getAnnotation()
assert new_annot.getDeviceType() == 7
def test_annotation_operator_def(self):
nn = ng.NNModule()
opdef = core.CreateOperator("Conv", [], [], engine="SENTINEL")
node = nn.dataFlow.createNode(opdef)
assert node.annotation.operator_def.engine == "SENTINEL"
opdef = core.CreateOperator("Conv", [], [], engine="NEW_SENTINEL")
node.annotation.operator_def = opdef
netdef = nn.convertToCaffe2Proto()
assert len(netdef.op) == 1
assert netdef.op[0].engine == "NEW_SENTINEL"
def test_annotation_device_option(self):
nn = ng.NNModule()
node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
d = caffe2_pb2.DeviceOption()
d.node_name = "test"
node.annotation.device_option = d
# access in a different way
d_2 = nn.controlFlow[0].annotation.device_option
assert d == d_2
def test_has_device_option(self):
nn = ng.NNModule()
node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
assert not node.annotation.hasDeviceOption()
d = caffe2_pb2.DeviceOption()
node.annotation.device_option = d
assert node.annotation.hasDeviceOption()
def test_distributed_annotations(self):
nn = ng.NNModule()
key = nn.dataFlow.createNode(ng.NeuralNetData("key"))
length = nn.dataFlow.createNode(ng.NeuralNetData("length"))
node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
annot = ng.Annotation()
annot.setKeyNode(key)
annot.setLengthNode(length)
annot.setComponentLevels(["", "test", "woot"])
node.setAnnotation(annot)
new_annot = node.getAnnotation()
#assert new_annot.getLengthNode() == length
assert new_annot.getKeyNode() == key
assert len(new_annot.getComponentLevels()) == 3
assert new_annot.getComponentLevels()[0] == ""
assert new_annot.getComponentLevels()[2] == "woot"
def test_distributed_device_map(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
d = caffe2_pb2.DeviceOption()
nn = ng.NNModule(net, {"X": d, "W": d})
with self.assertRaises(Exception):
nn = ng.NNModule(net, {"X": d, "Fake": d})
|
pytorch-master
|
caffe2/python/nomnigraph_test.py
|
from caffe2.python import core, scope
from caffe2.python.modeling.parameter_sharing import (
ParameterSharing,
)
from caffe2.python.optimizer import AdagradOptimizer, AdamOptimizer
from caffe2.python.layer_test_util import LayersTestCase
class ParameterSharingTest(LayersTestCase):
def test_layer_parameter_name(self):
output_dims = 2
with scope.NameScope('global_scope'):
fc1_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w, 'global_scope/fc/w')
self.assertEquals(fc1_output(), 'global_scope/fc/output')
with scope.NameScope('nested_scope'):
fc2_output = self.model.FC(
fc1_output,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/nested_scope/fc/w')
self.assertEquals(fc2_output(),
'global_scope/nested_scope/fc/output')
fc3_output = self.model.FC(
fc1_output,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/nested_scope/fc_auto_0/w')
self.assertEquals(fc3_output(),
'global_scope/nested_scope/fc_auto_0/output')
def test_layer_shared_parameter_name_different_namescopes(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'scope_1': 'scope_0'}):
with scope.NameScope('scope_0'):
fc1_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/scope_0/fc/w')
self.assertEquals(fc1_output(),
'global_scope/scope_0/fc/output')
with scope.NameScope('scope_1'):
fc2_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/scope_0/fc/w')
self.assertEquals(fc2_output(),
'global_scope/scope_1/fc/output')
def test_layer_shared_parameter_name_within_same_namescope(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'fc_auto_0': 'fc'}):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/fc/w')
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/fc/w')
def test_layer_shared_parameter_name_within_same_namescope_customized_name(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'new_fc': 'shared_fc'}):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
name='shared_fc'
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/shared_fc/w')
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
name='new_fc'
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/shared_fc/w')
def test_layer_shared_parameter_name_different_shapes(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'fc_auto_0': 'fc'}):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/fc/w')
with self.assertRaisesRegex(ValueError, 'Got inconsistent shapes .*'):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims + 1
)
def test_layer_duplicated_parameter_init(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'new_fc': 'shared_fc'}):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
name='shared_fc'
)
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
name='new_fc'
)
train_init_net = core.Net('train_init_net')
train_net = core.Net('train_net')
for layer in self.model.layers:
layer.add_operators(train_net, train_init_net)
op_outputs = []
for op in train_init_net._net.op:
op_outputs.extend(op.output)
# only fill these parameter blobs once
self.assertEquals(
sorted(op_outputs),
['global_scope/shared_fc/b', 'global_scope/shared_fc/w']
)
def test_layer_shared_parameter_optim_validator(self):
"""
This test is to cover the _validate_param_optim function in
layer_model_helper class.
"""
output_dims = 2
adagrad_optim = AdagradOptimizer(
alpha=0.004,
epsilon=0.02,
)
self.model.default_optimizer = adagrad_optim
# the following covers the branch -- optim is None
with scope.NameScope('global_scope_0'):
with ParameterSharing({'scope_1': 'scope_0'}):
with scope.NameScope('scope_0'):
fc1_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
weight_optim=self.model.NoOptim,
)
with scope.NameScope('scope_1'), self.assertRaises(Exception):
fc2_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
# the following covers the branch -- optim is NoOptim
with scope.NameScope('global_scope_1'):
with ParameterSharing({'scope_1': 'scope_0'}):
with scope.NameScope('scope_0'):
fc1_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
weight_optim=None,
)
with scope.NameScope('scope_1'), self.assertRaises(Exception):
fc2_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
weight_optim=self.model.NoOptim,
)
# the following covers the branch -- optim is an instance of Optimizer
adagrad_optim_2 = AdagradOptimizer(
alpha=0.005,
epsilon=0.02,
)
adam_optim = AdamOptimizer()
self.model.default_optimizer = adagrad_optim_2
with scope.NameScope('global_scope_2'):
with ParameterSharing({'scope_1': 'scope_0', 'scope_2': 'scope_0'}):
with scope.NameScope('scope_0'):
fc1_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
weight_optim=None, # it will use adagrad_optim_2
)
with scope.NameScope('scope_1'), self.assertRaises(Exception):
fc2_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
weight_optim=adagrad_optim,
)
with scope.NameScope('scope_2'), self.assertRaises(Exception):
fc2_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
weight_optim=adam_optim,
)
|
pytorch-master
|
caffe2/python/layer_parameter_sharing_test.py
|
## @package workspace
# Module caffe2.python.workspace
import collections
import contextlib
from google.protobuf.message import Message
from multiprocessing import Process
import os
from collections import defaultdict
import logging
import numpy as np
from past.builtins import basestring
import shutil
import socket
import tempfile
from caffe2.proto import caffe2_pb2
from caffe2.python import scope, utils
from caffe2.python.lazy import TriggerLazyImport
import caffe2.python._import_c_extension as C
logger = logging.getLogger(__name__)
Blobs = C.blobs
ResetBlob = C.reset_blob
CreateBlob = C.create_blob
CurrentWorkspace = C.current_workspace
DeserializeBlob = C.deserialize_blob
GlobalInit = C.global_init
HasBlob = C.has_blob
RegisteredOperators = C.registered_operators
SerializeBlob = C.serialize_blob
SwitchWorkspace = C.switch_workspace
RootFolder = C.root_folder
Workspaces = C.workspaces
BenchmarkNet = C.benchmark_net
BenchmarkNetOnce = C.benchmark_net_once
GetStats = C.get_stats
CreateOfflineTensor = C.create_offline_tensor
operator_tracebacks = defaultdict(dict)
is_asan = C.is_asan
has_fbgemm = C.has_fbgemm
has_cuda_support = C.has_cuda_support
has_hip_support = C.has_hip_support
has_gpu_support = C.has_gpu_support
if has_cuda_support:
GpuDeviceType = caffe2_pb2.CUDA
NumCudaDevices = C.num_cuda_devices
# This is a duplicate of NumCudaDevices. Remove
# NumCudaDevices once replaced everywhere in the code
NumGpuDevices = C.num_cuda_devices
GetCUDAVersion = C.get_cuda_version
GetCuDNNVersion = C.get_cudnn_version
def GetGpuPeerAccessPattern():
return np.asarray(C.get_cuda_peer_access_pattern())
GetDeviceProperties = C.get_device_properties
GetGPUMemoryInfo = C.get_gpu_memory_info
else:
# pyre-fixme[9]: incompatible type assignment
NumCudaDevices = lambda: 0 # noqa
# pyre-fixme[9]: incompatible type assignment
GetCUDAVersion = lambda: 0 # noqa
# pyre-fixme[9]: incompatible type assignment
GetCuDNNVersion = lambda: 0 # noqa
if has_hip_support:
GpuDeviceType = caffe2_pb2.HIP
# pyre-fixme[9]: incompatible type assignment
NumGpuDevices = C.num_hip_devices
GetHIPVersion = C.get_hip_version
def GetGpuPeerAccessPattern():
return np.asarray(C.get_hip_peer_access_pattern())
GetDeviceProperties = C.get_device_properties
GetGPUMemoryInfo = C.get_gpu_memory_info
if not has_gpu_support:
# setting cuda as the default GpuDeviceType as some tests
# like core, scope tests use GpuDeviceType even without gpu support
GpuDeviceType = caffe2_pb2.CUDA
# pyre-fixme[9]: incompatible type assignment
NumGpuDevices = lambda: 0 # noqa
GetDeviceProperties = lambda x: None # noqa
GetGpuPeerAccessPattern = lambda: np.array([]) # noqa
# pyre-fixme[9]: incompatible type assignment
GetGPUMemoryInfo = lambda: None # noqa
IsNUMAEnabled = C.is_numa_enabled
GetNumNUMANodes = C.get_num_numa_nodes
GetBlobNUMANode = C.get_blob_numa_node
GetBlobSizeBytes = C.get_blob_size_bytes
def FillRandomNetworkInputs(net, input_dims, input_types):
C.fill_random_network_inputs(net.Proto().SerializeToString(), input_dims, input_types)
def _GetFreeFlaskPort():
"""Get a free flask port."""
# We will prefer to use 5000. If not, we will then pick a random port.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', 5000))
if result == 0:
return 5000
else:
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
# Race condition: between the interval we close the socket and actually
# start a mint process, another process might have occupied the port. We
# don't do much here as this is mostly for convenience in research
# rather than 24x7 service.
return port
def StartMint(root_folder=None, port=None):
"""Start a mint instance.
TODO(Yangqing): this does not work well under ipython yet. According to
https://github.com/ipython/ipython/issues/5862
writing up some fix is a todo item.
"""
from caffe2.python.mint import app
if root_folder is None:
# Get the root folder from the current workspace
root_folder = C.root_folder()
if port is None:
port = _GetFreeFlaskPort()
process = Process(
target=app.main,
args=(
['-p', str(port), '-r', root_folder],
)
)
process.start()
print('Mint running at http://{}:{}'.format(socket.getfqdn(), port))
return process
def StringifyProto(obj):
"""Stringify a protocol buffer object.
Inputs:
obj: a protocol buffer object, or a Pycaffe2 object that has a Proto()
function.
Outputs:
string: the output protobuf string.
Raises:
AttributeError: if the passed in object does not have the right attribute.
"""
if isinstance(obj, basestring):
return obj
else:
if isinstance(obj, Message):
# First, see if this object is a protocol buffer, which we can
# simply serialize with the SerializeToString() call.
return obj.SerializeToString()
elif hasattr(obj, 'Proto'):
return obj.Proto().SerializeToString()
else:
raise ValueError("Unexpected argument to StringifyProto of type " +
type(obj).__name__)
def ResetWorkspace(root_folder=None):
if root_folder is None:
# Reset the workspace, but keep the current root folder setting.
return C.reset_workspace(C.root_folder())
else:
if not os.path.exists(root_folder):
os.makedirs(root_folder)
return C.reset_workspace(root_folder)
def CreateNet(net, overwrite=False, input_blobs=None):
TriggerLazyImport()
if input_blobs is None:
input_blobs = []
for input_blob in input_blobs:
C.create_blob(input_blob)
return CallWithExceptionIntercept(
C.create_net,
C.Workspace.current._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net), overwrite,
)
def Predictor(init_net, predict_net):
return C.Predictor(StringifyProto(init_net), StringifyProto(predict_net))
def GetOperatorCost(operator, blobs):
return C.get_operator_cost(StringifyProto(operator), blobs)
def RunOperatorOnce(operator):
return C.run_operator_once(StringifyProto(operator))
def RunOperatorMultiple(operator, num_runs):
return C.run_operator_multiple(StringifyProto(operator), num_runs)
def RunOperatorsOnce(operators):
for op in operators:
success = RunOperatorOnce(op)
if not success:
return False
return True
def ClearGlobalNetObserver():
return C.clear_global_net_observer()
def CallWithExceptionIntercept(func, op_id_fetcher, net_name, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
op_id = op_id_fetcher()
net_tracebacks = operator_tracebacks.get(net_name, None)
logger.warning(
'Original python traceback for operator `{}` in network '
'`{}` in exception above (most recent call last):'.format(
op_id, net_name))
if net_tracebacks and op_id in net_tracebacks:
tb = net_tracebacks[op_id]
for line in reversed(tb):
logger.warning(' File "{}", line {}, in {}'.format(
line[0], line[1], line[2]))
raise
def RunNetOnce(net):
return CallWithExceptionIntercept(
C.run_net_once,
C.Workspace.current._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net),
)
def RunNet(name, num_iter=1, allow_fail=False):
"""Runs a given net.
Inputs:
name: the name of the net, or a reference to the net.
num_iter: number of iterations to run
allow_fail: if True, does not assert on net exec failure but returns False
Returns:
True or an exception.
"""
return CallWithExceptionIntercept(
C.run_net,
C.Workspace.current._last_failed_op_net_position,
GetNetName(name),
StringifyNetName(name), num_iter, allow_fail,
)
def RunPlan(plan_or_step):
# TODO(jiayq): refactor core.py/workspace.py to avoid circular deps
import caffe2.python.core as core
if isinstance(plan_or_step, core.ExecutionStep):
plan_or_step = core.Plan(plan_or_step)
return C.run_plan(StringifyProto(plan_or_step))
def RunPlanInBackground(plan_or_step):
# TODO(jiayq): refactor core.py/workspace.py to avoid circular deps
import caffe2.python.core as core
if isinstance(plan_or_step, core.ExecutionStep):
plan_or_step = core.Plan(plan_or_step)
return C.run_plan_in_background(StringifyProto(plan_or_step))
def InferShapesAndTypes(nets, blob_dimensions=None, nets_proto=False,
blob_types=None):
"""Infers the shapes and types for the specified nets.
Inputs:
nets: the list of nets
blob_dimensions (optional): a dictionary of blobs and their dimensions.
If not specified, the workspace blobs are used.
nets_proto (optional): a boolean flag indicating whether the protobuffer
representation is passed to the routine.
Returns:
A tuple of (shapes, types) dictionaries keyed by blob name.
"""
if nets_proto:
net_protos = [StringifyProto(n) for n in nets]
else:
net_protos = [StringifyProto(n.Proto()) for n in nets]
if blob_dimensions is None:
assert blob_types is None
blobdesc_prototxt = C.infer_shapes_and_types_from_workspace(net_protos)
elif blob_types is None:
blobdesc_prototxt = C.infer_shapes_and_types_from_map(
net_protos, blob_dimensions
)
else:
blobdesc_prototxt = C.infer_shapes_and_types_from_map(
net_protos, blob_dimensions, blob_types
)
blobdesc_proto = caffe2_pb2.TensorShapes()
blobdesc_proto.ParseFromString(blobdesc_prototxt)
shapes = {}
types = {}
for ts in blobdesc_proto.shapes:
if not ts.unknown_shape:
shapes[ts.name] = list(ts.dims)
types[ts.name] = ts.data_type
return (shapes, types)
def _StringifyName(name, expected_type):
if isinstance(name, basestring):
return name
assert type(name).__name__ == expected_type, \
"Expected a string or %s" % expected_type
return str(name)
def StringifyBlobName(name):
return _StringifyName(name, "BlobReference")
def StringifyNetName(name):
return _StringifyName(name, "Net")
def GetNetName(net):
if isinstance(net, basestring):
return net
if type(net).__name__ == "Net" or type(net).__name__ == "NetWithShapeInference":
return net.Name()
if isinstance(net, caffe2_pb2.NetDef):
return net.name
raise Exception("Not a Net object: {}".format(str(net)))
def FeedBlob(name, arr, device_option=None):
"""Feeds a blob into the workspace.
Inputs:
name: the name of the blob.
arr: either a TensorProto object or a numpy array object to be fed into
the workspace.
device_option (optional): the device option to feed the data with.
Returns:
True or False, stating whether the feed is successful.
"""
ws = C.Workspace.current
return _Workspace_feed_blob(ws, name, arr, device_option)
def FetchBlobs(names):
"""Fetches a list of blobs from the workspace.
Inputs:
names: list of names of blobs - strings or BlobReferences
Returns:
list of fetched blobs
"""
return [FetchBlob(name) for name in names]
def FetchBlob(name):
"""Fetches a blob from the workspace.
Inputs:
name: the name of the blob - a string or a BlobReference
Returns:
Fetched blob (numpy array or string) if successful
"""
result = C.fetch_blob(StringifyBlobName(name))
if isinstance(result, tuple):
raise TypeError(
"Use FetchInt8Blob to fetch Int8 Blob {}".format(
StringifyBlobName(name)
)
)
return result
def FetchTorch(name):
ws = C.Workspace.current
return ws.blobs[name].to_torch()
Int8Tensor = collections.namedtuple(
'Int8Tensor', ['data', 'scale', 'zero_point']
)
def FetchInt8Blob(name):
"""Fetches an Int8 blob from the workspace. It shared backend implementation
with FetchBlob but it is recommended when fetching Int8 Blobs
Inputs:
name: the name of the Int8 blob - a string or a BlobReference
Returns:
data: int8 numpy array, data
scale: float, fake quantization scale
zero_point: int, fake quantization offset
"""
result = C.fetch_blob(StringifyBlobName(name))
assert isinstance(result, tuple), \
'You are not fetching an Int8Blob {}. Please use FetchBlob'.format(
StringifyBlobName(name))
return Int8Tensor(*result)
def FetchInt8BlobRealVal(name):
"""Fetches an Int8 blob from the workspace and return its real value representation.
Inputs:
name: the name of the Int8 blob - a string or a BlobReference
Returns:
real value representation of int8 numpy array
"""
result = C.fetch_blob(StringifyBlobName(name))
assert isinstance(result, tuple), \
'You are not fetching an Int8Blob {}. Please use FetchBlob'.format(
StringifyBlobName(name))
int8_blob = Int8Tensor(*result)
return (int8_blob.data.astype(np.int32) - int(int8_blob.zero_point)).astype(
np.float32) * int8_blob.scale
def _Workspace_fetch_int8_blob(ws, name):
"""Fetches an Int8 blob from the workspace. It shared backend implementation
with FetchBlob but it is recommended when fetching Int8 Blobs
Inputs:
name: the name of the Int8 blob - a string or a BlobReference
Returns:
data: int8 numpy array, data
scale: float, fake quantization scale
zero_point: int, fake quantization offset
"""
result = ws.fetch_blob(name)
assert isinstance(result, tuple), \
'You are not fetching an Int8Blob {}. Please use fetch_blob'.format(
StringifyBlobName(name))
return Int8Tensor(*result)
C.Workspace.fetch_int8_blob = _Workspace_fetch_int8_blob
def ApplyTransform(transform_key, net):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
Returns:
Transformed NetDef protobuf object.
"""
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
def ApplyTransformIfFaster(transform_key, net, init_net, **kwargs):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef, only if it runs faster than the original.
The runs are performed on the current active workspace (gWorkspace).
You should initialize that workspace before making a call to this function.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
init_net: The net to initialize the workspace.
warmup_runs (optional):
Determines how many times the net is run before testing.
Will be 5 by default.
main_runs (optional):
Determines how many times the net is run during testing.
Will be 10 by default.
improvement_threshold (optional):
Determines the factor which the new net needs to be faster
in order to replace the old. Will be 1.01 by default.
Returns:
Either a Transformed NetDef protobuf object, or the original netdef.
"""
warmup_runs = kwargs['warmup_runs'] if 'warmup_runs' in kwargs else 5
main_runs = kwargs['main_runs'] if 'main_runs' in kwargs else 10
improvement_threshold = kwargs['improvement_threshold'] \
if 'improvement_threshold' in kwargs else 1.01
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform_if_faster(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
init_net.SerializeToString(),
warmup_runs,
main_runs,
float(improvement_threshold),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
def GetNameScope():
"""Return the current namescope string. To be used to fetch blobs"""
return scope.CurrentNameScope()
class _BlobDict(object):
"""Provides python dict compatible way to do fetching and feeding"""
def __getitem__(self, key):
return FetchBlob(key)
def __setitem__(self, key, value):
return FeedBlob(key, value)
def __len__(self):
return len(C.blobs())
def __iter__(self):
return C.blobs().__iter__()
def __contains__(self, item):
return C.has_blob(item)
blobs = _BlobDict()
################################################################################
# Utilities for immediate mode
#
# Caffe2's immediate mode implements the following behavior: between the two
# function calls StartImmediate() and StopImmediate(), for any operator that is
# called through CreateOperator(), we will also run that operator in a workspace
# that is specific to the immediate mode. The user is explicitly expected to
# make sure that these ops have proper inputs and outputs, i.e. one should not
# run an op where an external input is not created or fed.
#
# Users can use FeedImmediate() and FetchImmediate() to interact with blobs
# in the immediate workspace.
#
# Once StopImmediate() is called, all contents in the immediate workspace is
# freed up so one can continue using normal runs.
#
# The immediate mode is solely for debugging purposes and support will be very
# sparse.
################################################################################
_immediate_mode = False
_immediate_workspace_name = "_CAFFE2_IMMEDIATE"
_immediate_root_folder = ''
def IsImmediate():
return _immediate_mode
@contextlib.contextmanager
def WorkspaceGuard(workspace_name):
current = CurrentWorkspace()
SwitchWorkspace(workspace_name, True)
yield
SwitchWorkspace(current)
def StartImmediate(i_know=False):
global _immediate_mode
global _immediate_root_folder
if IsImmediate():
# already in immediate mode. We will kill the previous one
# and start from fresh.
StopImmediate()
_immediate_mode = True
with WorkspaceGuard(_immediate_workspace_name):
_immediate_root_folder = tempfile.mkdtemp()
ResetWorkspace(_immediate_root_folder)
if i_know:
# if the user doesn't want to see the warning message, sure...
return
print("""
Enabling immediate mode in caffe2 python is an EXTREMELY EXPERIMENTAL
feature and may very easily go wrong. This is because Caffe2 uses a
declarative way of defining operators and models, which is essentially
not meant to run things in an interactive way. Read the following carefully
to make sure that you understand the caveats.
(1) You need to make sure that the sequences of operators you create are
actually runnable sequentially. For example, if you create an op that takes
an input X, somewhere earlier you should have already created X.
(2) Caffe2 immediate uses one single workspace, so if the set of operators
you run are intended to be under different workspaces, they will not run.
To create boundaries between such use cases, you can call FinishImmediate()
and StartImmediate() manually to flush out everything no longer needed.
(3) Underlying objects held by the immediate mode may interfere with your
normal run. For example, if there is a leveldb that you opened in immediate
mode and did not close, your main run will fail because leveldb does not
support double opening. Immediate mode may also occupy a lot of memory esp.
on GPUs. Call FinishImmediate() as soon as possible when you no longer
need it.
(4) Immediate is designed to be slow. Every immediate call implicitly
creates a temp operator object, runs it, and destroys the operator. This
slow-speed run is by design to discourage abuse. For most use cases other
than debugging, do NOT turn on immediate mode.
(5) If there is anything FATAL happening in the underlying C++ code, the
immediate mode will immediately (pun intended) cause the runtime to crash.
Thus you should use immediate mode with extra care. If you still would
like to, have fun [https://xkcd.com/149/].
""")
def StopImmediate():
"""Stops an immediate mode run."""
# Phew, that was a dangerous ride.
global _immediate_mode
global _immediate_root_folder
if not IsImmediate():
return
with WorkspaceGuard(_immediate_workspace_name):
ResetWorkspace()
shutil.rmtree(_immediate_root_folder)
_immediate_root_folder = ''
_immediate_mode = False
def ImmediateBlobs():
with WorkspaceGuard(_immediate_workspace_name):
return Blobs()
def RunOperatorImmediate(op):
with WorkspaceGuard(_immediate_workspace_name):
RunOperatorOnce(op)
def FetchImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FetchBlob(*args, **kwargs)
def FeedImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FeedBlob(*args, **kwargs)
# C.Workspace methods.
def _Workspace_create_net_with_exception_intercept(ws, net, overwrite=False):
return CallWithExceptionIntercept(
ws._create_net,
ws._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net), overwrite,
)
def _Workspace_run(ws, obj):
if hasattr(obj, 'Proto'):
obj = obj.Proto()
if isinstance(obj, caffe2_pb2.PlanDef):
return ws._run_plan(obj.SerializeToString())
if isinstance(obj, caffe2_pb2.NetDef):
return CallWithExceptionIntercept(
ws._run_net,
ws._last_failed_op_net_position,
GetNetName(obj),
obj.SerializeToString(),
)
# return ws._run_net(obj.SerializeToString())
if isinstance(obj, caffe2_pb2.OperatorDef):
return ws._run_operator(obj.SerializeToString())
raise ValueError(
"Don't know how to do Workspace.run() on {}".format(type(obj)))
def _Workspace_feed_blob(ws, name, arr, device_option=None):
if type(arr) is caffe2_pb2.TensorProto:
arr = utils.Caffe2TensorToNumpyArray(arr)
if type(arr) is np.ndarray and arr.dtype.kind in 'SU':
# Plain NumPy strings are weird, let's use objects instead
arr = arr.astype(np.object)
if device_option is None:
device_option = scope.CurrentDeviceScope()
if device_option and device_option.device_type == caffe2_pb2.CUDA:
if arr.dtype == np.dtype('float64'):
logger.warning(
"CUDA operators do not support 64-bit doubles, " +
"please use arr.astype(np.float32) or np.int32 for ints." +
" Blob: {}".format(name) +
" type: {}".format(str(arr.dtype))
)
name = StringifyBlobName(name)
if device_option is not None:
return ws.create_blob(name).feed(arr, device_option)
else:
return ws.create_blob(name).feed(arr)
def _Workspace_remove_blob(ws, blob):
ws._remove_blob(str(blob))
Workspace = C.Workspace
Workspace.create_net = _Workspace_create_net_with_exception_intercept
Workspace.run = _Workspace_run
Workspace.feed_blob = _Workspace_feed_blob
Workspace.remove_blob = _Workspace_remove_blob
# C.Blob methods.
def _Blob_feed(blob, arg, device_option=None):
# conservative type check to avoid unnecessary import
if type(arg).__name__ == 'Tensor' and type(arg).__module__ == 'torch':
import torch
if isinstance(arg, torch.Tensor):
assert device_option is None, \
"device_option doesn't make sense with PyTorch tensors"
handle = torch._C._tensor_impl_raw_handle(arg)
blob._wrap_tensor_impl(handle)
return True # _feed() returns True for some reason
if device_option is not None:
device_option = StringifyProto(device_option)
return blob._feed(arg, device_option)
C.Blob.feed = _Blob_feed
def _Tensor_to_torch(tensor):
"""
PyTorch tensor interop (TensorCPU methods)
Can be accessed as:
workspace.Workspace.current.blobs['foo'].tensor().to_torch()
"""
# avoiding circular dependency
import torch
handle = tensor._tensor_impl_raw_handle()
return torch._C._wrap_tensor_impl(handle)
C.TensorCPU.to_torch = _Tensor_to_torch
def _Blob_to_torch(blob):
if not blob.is_tensor():
raise RuntimeError("Blob has to be a tensor")
return blob.as_tensor().to_torch()
C.Blob.to_torch = _Blob_to_torch
|
pytorch-master
|
caffe2/python/workspace.py
|
## @package net_drawer
# Module caffe2.python.net_drawer
import argparse
import json
import logging
from collections import defaultdict
from caffe2.python import utils
from future.utils import viewitems
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
import pydot
except ImportError:
logger.info(
'Cannot import pydot, which is required for drawing a network. This '
'can usually be installed in python with "pip install pydot". Also, '
'pydot requires graphviz to convert dot files to pdf: in ubuntu, this '
'can usually be installed with "sudo apt-get install graphviz".'
)
print(
'net_drawer will not run correctly. Please install the correct '
'dependencies.'
)
pydot = None
from caffe2.proto import caffe2_pb2
OP_STYLE = {
'shape': 'box',
'color': '#0F9D58',
'style': 'filled',
'fontcolor': '#FFFFFF'
}
BLOB_STYLE = {'shape': 'octagon'}
def _rectify_operator_and_name(operators_or_net, name):
"""Gets the operators and name for the pydot graph."""
if isinstance(operators_or_net, caffe2_pb2.NetDef):
operators = operators_or_net.op
if name is None:
name = operators_or_net.name
elif hasattr(operators_or_net, 'Proto'):
net = operators_or_net.Proto()
if not isinstance(net, caffe2_pb2.NetDef):
raise RuntimeError(
"Expecting NetDef, but got {}".format(type(net)))
operators = net.op
if name is None:
name = net.name
else:
operators = operators_or_net
if name is None:
name = "unnamed"
return operators, name
def _escape_label(name):
# json.dumps is poor man's escaping
return json.dumps(name)
def GetOpNodeProducer(append_output, **kwargs):
def ReallyGetOpNode(op, op_id):
if op.name:
node_name = '%s/%s (op#%d)' % (op.name, op.type, op_id)
else:
node_name = '%s (op#%d)' % (op.type, op_id)
if append_output:
for output_name in op.output:
node_name += '\n' + output_name
return pydot.Node(node_name, **kwargs)
return ReallyGetOpNode
def GetBlobNodeProducer(**kwargs):
def ReallyGetBlobNode(node_name, label):
return pydot.Node(node_name, label=label, **kwargs)
return ReallyGetBlobNode
def GetPydotGraph(
operators_or_net,
name=None,
rankdir='LR',
op_node_producer=None,
blob_node_producer=None
):
if op_node_producer is None:
op_node_producer = GetOpNodeProducer(False, **OP_STYLE)
if blob_node_producer is None:
blob_node_producer = GetBlobNodeProducer(**BLOB_STYLE)
operators, name = _rectify_operator_and_name(operators_or_net, name)
graph = pydot.Dot(name, rankdir=rankdir)
pydot_nodes = {}
pydot_node_counts = defaultdict(int)
for op_id, op in enumerate(operators):
op_node = op_node_producer(op, op_id)
graph.add_node(op_node)
# print 'Op: %s' % op.name
# print 'inputs: %s' % str(op.input)
# print 'outputs: %s' % str(op.output)
for input_name in op.input:
if input_name not in pydot_nodes:
input_node = blob_node_producer(
_escape_label(
input_name + str(pydot_node_counts[input_name])),
label=_escape_label(input_name),
)
pydot_nodes[input_name] = input_node
else:
input_node = pydot_nodes[input_name]
graph.add_node(input_node)
graph.add_edge(pydot.Edge(input_node, op_node))
for output_name in op.output:
if output_name in pydot_nodes:
# we are overwriting an existing blob. need to update the count.
pydot_node_counts[output_name] += 1
output_node = blob_node_producer(
_escape_label(
output_name + str(pydot_node_counts[output_name])),
label=_escape_label(output_name),
)
pydot_nodes[output_name] = output_node
graph.add_node(output_node)
graph.add_edge(pydot.Edge(op_node, output_node))
return graph
def GetPydotGraphMinimal(
operators_or_net,
name=None,
rankdir='LR',
minimal_dependency=False,
op_node_producer=None,
):
"""Different from GetPydotGraph, hide all blob nodes and only show op nodes.
If minimal_dependency is set as well, for each op, we will only draw the
edges to the minimal necessary ancestors. For example, if op c depends on
op a and b, and op b depends on a, then only the edge b->c will be drawn
because a->c will be implied.
"""
if op_node_producer is None:
op_node_producer = GetOpNodeProducer(False, **OP_STYLE)
operators, name = _rectify_operator_and_name(operators_or_net, name)
graph = pydot.Dot(name, rankdir=rankdir)
# blob_parents maps each blob name to its generating op.
blob_parents = {}
# op_ancestry records the ancestors of each op.
op_ancestry = defaultdict(set)
for op_id, op in enumerate(operators):
op_node = op_node_producer(op, op_id)
graph.add_node(op_node)
# Get parents, and set up op ancestry.
parents = [
blob_parents[input_name] for input_name in op.input
if input_name in blob_parents
]
op_ancestry[op_node].update(parents)
for node in parents:
op_ancestry[op_node].update(op_ancestry[node])
if minimal_dependency:
# only add nodes that do not have transitive ancestry
for node in parents:
if all(
[node not in op_ancestry[other_node]
for other_node in parents]
):
graph.add_edge(pydot.Edge(node, op_node))
else:
# Add all parents to the graph.
for node in parents:
graph.add_edge(pydot.Edge(node, op_node))
# Update blob_parents to reflect that this op created the blobs.
for output_name in op.output:
blob_parents[output_name] = op_node
return graph
def GetOperatorMapForPlan(plan_def):
operator_map = {}
for net_id, net in enumerate(plan_def.network):
if net.HasField('name'):
operator_map[plan_def.name + "_" + net.name] = net.op
else:
operator_map[plan_def.name + "_network_%d" % net_id] = net.op
return operator_map
def _draw_nets(nets, g):
nodes = []
for i, net in enumerate(nets):
nodes.append(pydot.Node(_escape_label(net)))
g.add_node(nodes[-1])
if i > 0:
g.add_edge(pydot.Edge(nodes[-2], nodes[-1]))
return nodes
def _draw_steps(steps, g, skip_step_edges=False): # noqa
kMaxParallelSteps = 3
def get_label():
label = [step.name + '\n']
if step.report_net:
label.append('Reporter: {}'.format(step.report_net))
if step.should_stop_blob:
label.append('Stopper: {}'.format(step.should_stop_blob))
if step.concurrent_substeps:
label.append('Concurrent')
if step.only_once:
label.append('Once')
return '\n'.join(label)
def substep_edge(start, end):
return pydot.Edge(start, end, arrowhead='dot', style='dashed')
nodes = []
for i, step in enumerate(steps):
parallel = step.concurrent_substeps
nodes.append(pydot.Node(_escape_label(get_label()), **OP_STYLE))
g.add_node(nodes[-1])
if i > 0 and not skip_step_edges:
g.add_edge(pydot.Edge(nodes[-2], nodes[-1]))
if step.network:
sub_nodes = _draw_nets(step.network, g)
elif step.substep:
if parallel:
sub_nodes = _draw_steps(
step.substep[:kMaxParallelSteps], g, skip_step_edges=True)
else:
sub_nodes = _draw_steps(step.substep, g)
else:
raise ValueError('invalid step')
if parallel:
for sn in sub_nodes:
g.add_edge(substep_edge(nodes[-1], sn))
if len(step.substep) > kMaxParallelSteps:
ellipsis = pydot.Node('{} more steps'.format(
len(step.substep) - kMaxParallelSteps), **OP_STYLE)
g.add_node(ellipsis)
g.add_edge(substep_edge(nodes[-1], ellipsis))
else:
g.add_edge(substep_edge(nodes[-1], sub_nodes[0]))
return nodes
def GetPlanGraph(plan_def, name=None, rankdir='TB'):
graph = pydot.Dot(name, rankdir=rankdir)
_draw_steps(plan_def.execution_step, graph)
return graph
def GetGraphInJson(operators_or_net, output_filepath):
operators, _ = _rectify_operator_and_name(operators_or_net, None)
blob_strid_to_node_id = {}
node_name_counts = defaultdict(int)
nodes = []
edges = []
for op_id, op in enumerate(operators):
op_label = op.name + '/' + op.type if op.name else op.type
op_node_id = len(nodes)
nodes.append({
'id': op_node_id,
'label': op_label,
'op_id': op_id,
'type': 'op'
})
for input_name in op.input:
strid = _escape_label(
input_name + str(node_name_counts[input_name]))
if strid not in blob_strid_to_node_id:
input_node = {
'id': len(nodes),
'label': input_name,
'type': 'blob'
}
blob_strid_to_node_id[strid] = len(nodes)
nodes.append(input_node)
else:
input_node = nodes[blob_strid_to_node_id[strid]]
edges.append({
'source': blob_strid_to_node_id[strid],
'target': op_node_id
})
for output_name in op.output:
strid = _escape_label(
output_name + str(node_name_counts[output_name]))
if strid in blob_strid_to_node_id:
# we are overwriting an existing blob. need to update the count.
node_name_counts[output_name] += 1
strid = _escape_label(
output_name + str(node_name_counts[output_name]))
if strid not in blob_strid_to_node_id:
output_node = {
'id': len(nodes),
'label': output_name,
'type': 'blob'
}
blob_strid_to_node_id[strid] = len(nodes)
nodes.append(output_node)
edges.append({
'source': op_node_id,
'target': blob_strid_to_node_id[strid]
})
with open(output_filepath, 'w') as f:
json.dump({'nodes': nodes, 'edges': edges}, f)
# A dummy minimal PNG image used by GetGraphPngSafe as a
# placeholder when rendering fail to run.
_DummyPngImage = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00'
b'\x01\x01\x00\x00\x00\x007n\xf9$\x00\x00\x00\nIDATx\x9cc`\x00\x00'
b'\x00\x02\x00\x01H\xaf\xa4q\x00\x00\x00\x00IEND\xaeB`\x82')
def GetGraphPngSafe(func, *args, **kwargs):
"""
Invokes `func` (e.g. GetPydotGraph) with args. If anything fails - returns
and empty image instead of throwing Exception
"""
try:
graph = func(*args, **kwargs)
if not isinstance(graph, pydot.Dot):
raise ValueError("func is expected to return pydot.Dot")
return graph.create_png()
except Exception as e:
logger.error("Failed to draw graph: {}".format(e))
return _DummyPngImage
def main():
parser = argparse.ArgumentParser(description="Caffe2 net drawer.")
parser.add_argument(
"--input",
type=str, required=True,
help="The input protobuf file."
)
parser.add_argument(
"--output_prefix",
type=str, default="",
help="The prefix to be added to the output filename."
)
parser.add_argument(
"--minimal", action="store_true",
help="If set, produce a minimal visualization."
)
parser.add_argument(
"--minimal_dependency", action="store_true",
help="If set, only draw minimal dependency."
)
parser.add_argument(
"--append_output", action="store_true",
help="If set, append the output blobs to the operator names.")
parser.add_argument(
"--rankdir", type=str, default="LR",
help="The rank direction of the pydot graph."
)
args = parser.parse_args()
with open(args.input, 'r') as fid:
content = fid.read()
graphs = utils.GetContentFromProtoString(
content, {
caffe2_pb2.PlanDef: lambda x: GetOperatorMapForPlan(x),
caffe2_pb2.NetDef: lambda x: {x.name: x.op},
}
)
for key, operators in viewitems(graphs):
if args.minimal:
graph = GetPydotGraphMinimal(
operators,
name=key,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(args.append_output, **OP_STYLE),
minimal_dependency=args.minimal_dependency)
else:
graph = GetPydotGraph(
operators,
name=key,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(args.append_output, **OP_STYLE))
filename = args.output_prefix + graph.get_name() + '.dot'
graph.write(filename, format='raw')
pdf_filename = filename[:-3] + 'pdf'
try:
graph.write_pdf(pdf_filename)
except Exception:
print(
'Error when writing out the pdf file. Pydot requires graphviz '
'to convert dot files to pdf, and you may not have installed '
'graphviz. On ubuntu this can usually be installed with "sudo '
'apt-get install graphviz". We have generated the .dot file '
'but will not be able to generate pdf file for now.'
)
if __name__ == '__main__':
main()
|
pytorch-master
|
caffe2/python/net_drawer.py
|
# @package optimizer
# Module caffe2.python.normalizer
class Normalizer(object):
def __init__(self):
pass
"""
Adds normalization to train_net for given parameter. Its factor ahead of
regularization is given when initialization.
The param should be a BlobReference.
"""
def __call__(self, net, param):
return self._run(net, param)
def _run(self, net, param):
raise Exception("Not Impelemented")
class BatchNormalizer(Normalizer):
def __init__(self, momentum, scale_init_value=1.0):
super(BatchNormalizer, self).__init__()
self._momentum = float(momentum)
self._scale_init_value = float(scale_init_value)
def _run(self, layer_model, param):
return layer_model.BatchNormalization(
param, momentum=self._momentum, scale_init_value=self._scale_init_value
)
class LayerNormalizer(Normalizer):
def __init__(self, epsilon, use_layer_norm_op=True, scale_init_value=1.0):
super(LayerNormalizer, self).__init__()
self._epsilon = float(epsilon)
self._use_layer_norm_op = use_layer_norm_op
self._scale_init_value = float(scale_init_value)
def _run(self, layer_model, param):
return layer_model.LayerNormalization(
param, epsilon=self._epsilon, use_layer_norm_op=self._use_layer_norm_op, scale_init_value=self._scale_init_value
)
|
pytorch-master
|
caffe2/python/normalizer.py
|
## @package control_ops_grad
# Module caffe2.python.control_ops_grad
from caffe2.proto import caffe2_pb2
def gen_do_gradient(op, g_output):
"""
Generates gradient Do operator, given forward Do op and a list
of gradient blobs corresponding to forward op's outputs
Returns a gradient op and a list of blobs corresponding to input gradients
"""
from caffe2.python.core import BlobReference
subnet, outer_to_inner_map, inner_to_outer_map, workspace_blob_name = \
_do_op_sanity_check_and_process(op)
assert len(g_output) == len(op.output), \
"Different number of gradient blobs and Do op outputs"
grad_ops, deduped_g_output = dedupe_g_output(op, g_output)
g_output = deduped_g_output
# From the outer net point of view:
# Do is an operator that has some number of inputs and outputs;
# we have to generate a gradient operator that writes into
# corresponding input gradient blobs and has access to inputs, outputs
# and gradient output blobs
# From the inner net point of view:
# Do is an operator with a subnet and blob bindings,
# we need to forward Do's output blob gradients into inner workspace,
# use them to run backward pass generation and forward Do's input blob
# gradients back into outer workspace
op_output = [str(o) for o in op.output]
op_output = op_output[:-1] # remove workspace pointer blob
op_input = [str(i) for i in op.input]
op_input = op_input[:-1] # remove workspace pointer blob
ordered_inner_output_blob_names = [outer_to_inner_map[o] for o in op_output]
backward_pass_initial_grad_map = {}
initial_grad_map = {}
for inner_output_name, outer_grad_output_name in \
zip(ordered_inner_output_blob_names, g_output):
# link inner_output_name to corresponding inner_grad_output_name for
# backward pass generation;
if outer_grad_output_name:
inner_grad_output_name = inner_output_name + "/_DO_OPERATOR_INNER_GRAD_"
backward_pass_initial_grad_map[BlobReference(inner_output_name)] = \
BlobReference(inner_grad_output_name)
initial_grad_map[inner_grad_output_name] = str(outer_grad_output_name)
assert len(initial_grad_map) > 0, "Empty initial gradient map for Do op"
inner_grad_ops, inner_grad_names_map = _gen_subgradient_pass(
subnet, backward_pass_initial_grad_map)
if len(inner_grad_ops) == 0:
return [], []
grad_copy_ops = []
g_input = []
new_op_outputs = []
new_blob_bindings = {}
for outer_input_name in op_input:
inner_input_name = outer_to_inner_map[outer_input_name]
if inner_input_name in inner_grad_names_map:
inner_grad_input_name = inner_grad_names_map[inner_input_name]
outer_grad_input_name = outer_input_name + "_grad"
# It is possible that inner_grad_input_name will need to be
# linked to another outer blob. For example:
#
# // y - param initialized in init_net
# x = ...
# z = ...
# with ops.IfNet(...):
# ops.Add([z, x], y) # inner Do block
# loss = f(..., y, ...)
#
# In this case x, y and z are external for the inner Do block,
# the inputs of the Do block are z and x and the output is y.
# When computing the gradient of input x given the gradient
# of output y it's easy to see that they are equal.
# During the generation of gradient Do operator, we link
# external gradient y (y_grad) to the internal name
# (y/_DO_OPERATOR_INNER_GRAD_) and generate the backward pass
# for the internal Do net. As a result we get gradient operators
# for the gradient Do and gradient map that maps internal Do
# blobs to their computed gradients.
# In this example, gradient map may have blob x linked to
# gradient blob y/_DO_OPERATOR_INNER_GRAD_.
# We should export gradient for x outside of Do, so
# we add a blob mapping from inner gradient blob
# (y/_DO_OPERATOR_INNER_GRAD_) to a new outer name (x_grad).
#
# (Note: since we use transparent blob mapping between outer and
# inner (Do's) workspace, these operations do not involve copying
# but are merely using blobs in outer workspace in the Do's operator
# workspace under (possibly) different names)
#
# At the same time, we need to add a blob mapping from inner name
# y/_DO_OPERATOR_INNER_GRAD_ to the outer blob y_grad
# Hence in this case, we cannot use existing blob mapping scheme
# that requires a bijection between subset of inner blob names and
# a set of all (Do's input and output) outer blob names
# TODO(iliacher): Remove unnecessary blob copying
new_inner_grad_input_name = \
inner_input_name + "/_DO_OPERATOR_INNER_GRAD_COPY_"
grad_copy_ops.append(_prepare_blob_copy_op(
inner_grad_input_name, new_inner_grad_input_name))
new_blob_bindings[new_inner_grad_input_name] = outer_grad_input_name
new_op_outputs.append(outer_grad_input_name)
g_input.append(outer_grad_input_name)
else:
g_input.append(None)
new_op_inputs = []
overwritten_names = set()
saved_local_blob_names = set()
for grad_op in inner_grad_ops:
grad_op_input = [str(i) for i in grad_op.input]
grad_op_output = [str(o) for o in grad_op.output]
for grad_op_input_name in grad_op_input:
if grad_op_input_name in overwritten_names:
continue
# check if this is an external blob
outer_name = inner_to_outer_map.get(grad_op_input_name, None)
if not outer_name:
# check if this is an external gradient blob
outer_name = initial_grad_map.get(grad_op_input_name, None)
if outer_name:
outer_name = str(outer_name)
if outer_name not in new_op_inputs:
new_op_inputs.append(outer_name)
new_blob_bindings[grad_op_input_name] = outer_name
else:
# this is a local blob, we'll get it's value from
# a saved forward op workspace
saved_local_blob_names.add(grad_op_input_name)
overwritten_names.update(grad_op_output)
# add inner gradient copy ops
inner_grad_ops += grad_copy_ops
gradient_do_def = _prepare_gradient_do_op(
fwd_op=op,
fwd_net=subnet,
grad_ops=inner_grad_ops,
inputs=new_op_inputs,
outputs=new_op_outputs,
blob_bindings=new_blob_bindings,
saved_fwd_blobs=saved_local_blob_names,
workspace_blob_name=workspace_blob_name)
grad_ops.append(gradient_do_def)
_do_op_sanity_check_and_process(gradient_do_def)
return grad_ops, g_input
def dedupe_g_output(op, g_output):
# When generation a gradient op it's possible to receive the same gradient
# blob corresponding to different forward op output blobs, Do operator
# requires a bijection between inner and outer names, make sure we do
# deduplication
grad_ops = []
deduped_g_output = []
init_grad_map = {}
for output_name, grad_name in zip(op.output, g_output):
if not grad_name:
deduped_g_output.append(grad_name)
continue
if output_name in init_grad_map:
deduped_g_output.append(init_grad_map[output_name])
else:
if grad_name not in init_grad_map.values():
init_grad_map[output_name] = grad_name
deduped_g_output.append(grad_name)
else:
deduped_grad_name = output_name + "_" + grad_name + "_DEDUP"
assert deduped_grad_name not in init_grad_map.values()
grad_copy_op = caffe2_pb2.OperatorDef()
grad_copy_op.type = "Copy"
grad_copy_op.input.extend([grad_name])
grad_copy_op.output.extend([deduped_grad_name])
grad_ops.append(grad_copy_op)
deduped_g_output.append(deduped_grad_name)
init_grad_map[output_name] = deduped_grad_name
return grad_ops, deduped_g_output
def gen_while_gradient(op, g_output):
"""
Generates gradient While operator
"""
from caffe2.python.core import BlobReference
assert op.type == "While", "Expected While op"
assert len(op.input) > 0, "Expected at least one input in While op"
assert len(op.output) == len(g_output), \
"Different number of gradient blobs and While op outputs"
grad_ops, deduped_g_output = dedupe_g_output(op, g_output)
g_output = deduped_g_output
init_grad_map = {}
op_output = [str(o) for o in op.output]
for output_name, grad_output_name in zip(op_output, g_output):
if grad_output_name:
init_grad_map[BlobReference(output_name)] = \
BlobReference(grad_output_name)
assert len(init_grad_map) > 0, "Empty initial gradient map for While op"
loop_net = _get_net_argument(op, "loop_net")
assert loop_net, "Expected loop subnet in While op"
assert len(loop_net.op) == 1 and loop_net.op[0].type == "Do", \
"Gradient While op requires single Do op as a loop body"
do_op = loop_net.op[0]
do_args = _get_do_arguments(do_op)
assert "reuse_workspace" not in do_args or not do_args["reuse_workspace"], \
"Gradient While op requires Do loop body op without reuse_workspace set"
assert len(do_op.output) > 0, "Expected Do op with at least one output"
workspace_blob = do_op.output[-1]
loop_grad_net, loop_grad_map, loop_input_names, loop_output_names = \
_gen_subnet_gradient(loop_net, init_grad_map)
assert loop_grad_net, "Failed to get gradient net for loop body in While op"
grad_ops += _prepare_gradient_while_ops(
fwd_op=op,
input_names=loop_input_names,
output_names=loop_output_names,
loop_grad_net=loop_grad_net,
workspace_blob=workspace_blob,
init_grad_map=init_grad_map,
loop_grad_map=loop_grad_map)
op_input = [str(i) for i in op.input]
g_input = [loop_grad_map.get(i, None) for i in op_input]
return grad_ops, g_input
# Constructs gradient While op, arguments:
# fwd_op - forward While op
# input_names - input blob names for a gradient op
# output_names - output blob names for a gradient op
# loop_grad_net - gradient loop body net
# workspace_blob - blob that holds forward workspaces stack
# init_grad_map - initial gradient to forward blob map
# loop_grad_map - gradient blob map for loop's body
def _prepare_gradient_while_ops(
fwd_op, input_names, output_names, loop_grad_net, workspace_blob,
init_grad_map, loop_grad_map):
gradient_while_def = caffe2_pb2.OperatorDef()
gradient_while_def.CopyFrom(fwd_op)
if gradient_while_def.name:
gradient_while_def.name += "_grad"
loop_net_arg = caffe2_pb2.Argument()
loop_net_arg.name = "loop_net"
loop_net_arg.n.CopyFrom(loop_grad_net)
cond_net_arg = caffe2_pb2.Argument()
cond_net_arg.name = "cond_net"
from caffe2.python.core import Net, BlobReference
# Construct condition net - check that there're still forward workspaces
# left using HasScope op
cond_net = Net('gradient_loop_cond_net')
cond_init_net = Net('gradient_loop_cond_net_init')
cond_blob = cond_net.NextScopedBlob(cond_net.Name() + '/cond')
cond_init_net.HasScope(workspace_blob, cond_blob)
cond_net.HasScope(workspace_blob, cond_blob)
for blob, init_grad_blob in init_grad_map.items():
blob_name = str(blob)
init_grad_blob_name = str(init_grad_blob)
if blob_name in loop_grad_map and \
loop_grad_map[blob_name] != init_grad_blob_name:
cond_net.Copy(
BlobReference(loop_grad_map[blob_name]), init_grad_blob)
cond_init_net.Copy(
init_grad_blob, BlobReference(loop_grad_map[blob_name]))
cond_net_arg.n.CopyFrom(cond_net.Proto())
del gradient_while_def.arg[:]
gradient_while_def.arg.extend([loop_net_arg, cond_net_arg])
del gradient_while_def.control_input[:]
del gradient_while_def.input[:]
gradient_while_def.input.extend(
[str(cond_blob).encode('utf-8')] + list(input_names))
del gradient_while_def.output[:]
gradient_while_def.output.extend(output_names)
gradient_while_def.is_gradient_op = True
return [o for o in cond_init_net.Proto().op] + [gradient_while_def]
def _get_do_arguments(do_op):
assert do_op.type == "Do", "Expected Do op"
args = {}
for arg in do_op.arg:
if not arg.name:
continue
if arg.name == "net":
assert arg.n, "Expected non empty net argument"
args["net"] = arg.n
elif arg.name == "reuse_workspace":
assert arg.i, "Expected non empty reuse_workspace argument"
args["reuse_workspace"] = bool(arg.i)
elif arg.name == "inner_blobs":
assert arg.strings, "Expected non empty inner_blobs argument"
args["inner_blobs"] = arg.strings
elif arg.name == "outer_blobs_idx":
assert arg.ints, "Expected non empty outer_blobs_idx argument"
args["outer_blobs_idx"] = arg.ints
return args
def gen_if_gradient(op, g_output):
"""
Generates gradient If operator, given forward If op and a list
of gradient blobs corresponding to forward op's outputs
Returns a gradient op and a list of blobs corresponding to input gradients
"""
from caffe2.python.core import BlobReference
assert op.type == "If", "Expected If op"
# first input is the condition blob
assert len(op.input) > 0, "Expected at least one input in If op"
assert len(op.output) == len(g_output), \
"Different number of gradient blobs and If op outputs"
grad_ops, deduped_g_output = dedupe_g_output(op, g_output)
g_output = deduped_g_output
init_grad_map = {} # map from if's output blob to output gradient blob
op_input = [str(i) for i in op.input]
op_output = [str(o) for o in op.output]
for output_name, grad_output_name in zip(op_output, g_output):
if grad_output_name:
init_grad_map[BlobReference(output_name)] = \
BlobReference(grad_output_name)
# shouldn't call without at least one output gradient available
assert len(init_grad_map) > 0, "Empty initial gradient map for If op"
grad_map = {} # map from blob to gradient blob
then_net = _get_net_argument(op, "then_net")
assert then_net, "Expected then subnet in If op"
then_grad_net, then_grad_map, then_input_names, then_output_names = \
_gen_subnet_gradient(then_net, init_grad_map)
assert then_grad_net, "Failed to get gradient net for then in If op"
grad_map.update(then_grad_map)
else_input_names = set()
else_output_names = set()
else_grad_map = {}
else_grad_net = None
else_net = _get_net_argument(op, "else_net")
if else_net:
else_grad_net, else_grad_map, else_input_names, else_output_names = \
_gen_subnet_gradient(else_net, init_grad_map)
assert else_grad_net, "Failed to get gradient net for else in If op"
# consider case: else doesn't update blob's gradient and keeps original
# from init_grad_map, but then updates the gradient
for else_blob, else_grad_blob in else_grad_map.items():
if else_blob in then_grad_map:
then_grad_blob = then_grad_map[else_blob]
# if both then and else branches have grad blob name for the same
# blob and grad names are different, then one of the branches
# doesn't use blob and has original grad blob name in it's grad map,
# and another branch uses blob and has <blob_name>_grad name
# in it's grad map (might be different from original grad blob)
if then_grad_blob != else_grad_blob:
init_grad_name = init_grad_map[else_blob] \
if else_blob in init_grad_map else None
if then_grad_blob == init_grad_name:
grad_map[else_blob] = else_grad_blob
elif else_grad_blob == init_grad_name:
grad_map[else_blob] = then_grad_blob
else:
raise "Unexpected grad blob name " + else_blob + ", " + \
else_grad_blob + ", " + then_grad_blob
else:
grad_map[else_blob] = else_grad_blob
# make sure gradients of blobs that were not computed
# by the selected if's branch are initialized with zeros
then_other_output_names = \
then_output_names - (then_output_names & else_output_names)
then_other_grad_output_names = set(
[o for o in then_other_output_names if o in then_grad_map.values()])
zero_then = _gen_grad_zero_init_ops(
init_grad_map, then_grad_map, then_other_grad_output_names)
if else_grad_net:
else_grad_net.op.extend(zero_then)
elif len(zero_then) > 0:
else_grad_net = caffe2_pb2.NetDef()
else_grad_net.CopyFrom(then_grad_net)
if else_grad_net.name:
else_grad_net.name += "_auto_else_zero_blobs_"
del else_grad_net.op[:]
else_grad_net.op.extend(zero_then)
del else_grad_net.external_input[:]
del else_grad_net.external_output[:]
else_other_output_names = \
else_output_names - (then_output_names & else_output_names)
else_other_grad_output_names = set(
[o for o in else_other_output_names if o in else_grad_map.values()])
zero_else = _gen_grad_zero_init_ops(
init_grad_map, else_grad_map, else_other_grad_output_names)
then_grad_net.op.extend(zero_else)
output_names = list(then_output_names | else_output_names)
input_names = then_input_names | else_input_names
# make sure condition blob is the first in the list
input_names = [op_input[0]] + list(input_names - set(op_input[0]))
gradient_if_def = _prepare_gradient_if_op(
fwd_op=op,
input_names=input_names,
output_names=output_names,
then_grad_net=then_grad_net,
else_grad_net=else_grad_net)
g_input = [grad_map.get(i, None) for i in op_input]
return grad_ops + [gradient_if_def], g_input
def _gen_subnet_gradient(subnet, init_grad):
grad_ops, grad_names_map = _gen_subgradient_pass(
subnet, init_grad)
output_names = set()
input_names = set()
for grad_op in grad_ops:
for grad_op_input in grad_op.input:
if str(grad_op_input) not in output_names:
input_names.add(str(grad_op_input))
for grad_op_output in grad_op.output:
output_names.add(str(grad_op_output))
gradient_net_def = caffe2_pb2.NetDef()
gradient_net_def.CopyFrom(subnet)
if gradient_net_def.name:
gradient_net_def.name += "_grad"
del gradient_net_def.op[:]
gradient_net_def.op.extend(grad_ops)
del gradient_net_def.external_input[:]
del gradient_net_def.external_output[:]
return gradient_net_def, grad_names_map, input_names, output_names
def _get_net_argument(op, net_name):
for arg in op.arg:
if arg.name and arg.name == net_name:
assert arg.n, "Expected non empty net argument " + net_name
return arg.n
return None
def getNetArgument(op, net_name):
"""A wrapper for external call"""
return _get_net_argument(op, net_name)
def _gen_subgradient_pass(subnet, init_grad):
from caffe2.python.core import IR
subnet_ir = IR(subnet.op)
grad_ops, grad_blob_map = \
subnet_ir.GetBackwardPass(init_grad)
grad_names_map = {}
for b, g in grad_blob_map.items():
grad_names_map[str(b)] = str(g)
return grad_ops, grad_names_map
def _do_op_sanity_check_and_process(op):
assert op.type == "Do", "Expected Do op"
subnet = _get_net_argument(op, "net")
assert subnet, "No net argument found in Do op"
inner_blobs = None
outer_blobs_idx = None
for arg in op.arg:
if arg.name and arg.name == "inner_blobs":
assert not inner_blobs, "inner_blobs redefinition"
assert arg.strings and len(arg.strings) > 0, \
"Empty inner_blobs argument in Do op"
inner_blobs = [s.decode('utf-8') for s in arg.strings]
if arg.name and arg.name == "outer_blobs_idx":
assert not outer_blobs_idx, "outer_blobs_idx redefinition"
assert arg.ints and len(arg.ints) > 0, \
"Empty outer_blobs_idx argument in Do op"
outer_blobs_idx = arg.ints
if inner_blobs and outer_blobs_idx:
break
assert inner_blobs, "No inner_blobs argument found in Do op"
assert outer_blobs_idx, "No outer_blobs_idx argument found in Do op"
assert len(inner_blobs) == len(outer_blobs_idx), \
"Arguments inner_blobs and outer_blobs_idx of different length in Do op"
all_inner_blobs = set(inner_blobs)
assert len(all_inner_blobs) == len(inner_blobs), \
"Found duplicates in inner_blobs in Do op"
op_input = [str(i) for i in op.input]
assert len(op_input) > 0, "Expected at least one input blob"
# remove last input blob that holds pointer to workspace
input_workspace_blob_name = op_input[-1]
op_input = op_input[:-1]
op_output = [str(o) for o in op.output]
assert len(op_output) > 0, "Expected at least one output blob"
# remove last output blob that holds pointer to workspace
workspace_blob_name = op_output[-1]
assert input_workspace_blob_name == workspace_blob_name, \
"Expected same input/output workspace blob"
op_output = op_output[:-1]
all_op_input_blob_names = set(op_input)
assert len(all_op_input_blob_names) == len(op_input), \
"Found duplicates in Do op inputs"
all_op_output_blob_names = set(op_output)
assert len(all_op_output_blob_names) == len(op_output), \
"Found duplicates in Do op outputs"
ordered_outer_blob_names = op_input + op_output
all_outer_blob_names = set(ordered_outer_blob_names)
used_outer_blob_names = set()
outer_to_inner_map = {}
inner_to_outer_map = {}
for inner_name, outer_blob_idx in zip(inner_blobs, outer_blobs_idx):
assert outer_blob_idx >= 0 and \
outer_blob_idx < len(ordered_outer_blob_names), \
"Outer blob index is out of bounds in Do op"
outer_name = ordered_outer_blob_names[outer_blob_idx]
assert outer_name not in used_outer_blob_names, \
"Reusage of outer blob name " + outer_name + " in Do op"
used_outer_blob_names.add(outer_name)
outer_to_inner_map[outer_name] = inner_name
inner_to_outer_map[inner_name] = outer_name
assert len(used_outer_blob_names) == len(all_outer_blob_names), \
"Not all outer blob names are used in blob bindings in Do op"
return subnet, outer_to_inner_map, inner_to_outer_map, workspace_blob_name
def _prepare_blob_copy_op(from_name, to_name):
copy_op_def = caffe2_pb2.OperatorDef()
copy_op_def.type = "Copy"
copy_op_def.input.extend([from_name])
copy_op_def.output.extend([to_name])
return copy_op_def
def _prepare_gradient_do_op(
fwd_op, fwd_net, grad_ops, inputs, outputs, blob_bindings, saved_fwd_blobs,
workspace_blob_name):
gradient_net_def = caffe2_pb2.NetDef()
gradient_net_def.CopyFrom(fwd_net)
if gradient_net_def.name:
gradient_net_def.name += "_grad"
del gradient_net_def.op[:]
gradient_net_def.op.extend(grad_ops)
del gradient_net_def.external_input[:]
del gradient_net_def.external_output[:]
gradient_do_def = caffe2_pb2.OperatorDef()
gradient_do_def.CopyFrom(fwd_op)
if gradient_do_def.name and len(gradient_do_def.name) > 0:
gradient_do_def.name += "_grad"
del gradient_do_def.input[:]
gradient_do_def.input.extend(inputs)
# workspace pointer blob
gradient_do_def.input.append(workspace_blob_name)
del gradient_do_def.output[:]
gradient_do_def.output.extend(outputs)
# workspace pointer blob
gradient_do_def.output.append(workspace_blob_name)
net_arg = caffe2_pb2.Argument()
net_arg.name = "net"
net_arg.n.CopyFrom(gradient_net_def)
ordered_new_outer_names = inputs + outputs
inner_blobs = blob_bindings.keys()
new_outer_blobs_idx = [ordered_new_outer_names.index(blob_bindings[b])
for b in inner_blobs]
inner_blobs_arg = caffe2_pb2.Argument()
inner_blobs_arg.name = "inner_blobs"
inner_blobs_arg.strings.extend([b.encode('utf-8') for b in inner_blobs])
outer_blobs_idx_arg = caffe2_pb2.Argument()
outer_blobs_idx_arg.name = "outer_blobs_idx"
outer_blobs_idx_arg.ints.extend(new_outer_blobs_idx)
saved_blobs_arg = caffe2_pb2.Argument()
saved_blobs_arg.name = "saved_fwd_blobs"
saved_blobs_arg.strings.extend(
[b.encode('utf-8') for b in saved_fwd_blobs])
del gradient_do_def.arg[:]
gradient_do_def.arg.extend([
net_arg, inner_blobs_arg, outer_blobs_idx_arg, saved_blobs_arg])
del gradient_do_def.control_input[:]
gradient_do_def.is_gradient_op = True
return gradient_do_def
def _gen_grad_zero_init_ops(init_grad_map, grad_map, grad_output_names):
grad_init_ops = []
for grad_output in grad_output_names:
# get the corresponding output name blob and use it in ConstantFill
# so that grad_output has the same shape
output_name = None
for o, g in grad_map.items():
if g == grad_output:
output_name = o
break
assert output_name, "Unknown gradient output " + grad_output
grad_init_op = None
# make sure that we do not overwrite existing gradients with zeros
if output_name in init_grad_map:
init_grad_name = init_grad_map[output_name]
# in case we use a different gradient blob name, copy gradient
if init_grad_name != grad_output:
grad_init_op = caffe2_pb2.OperatorDef()
grad_init_op.type = "Copy"
grad_init_op.input.extend([str(init_grad_name)])
grad_init_op.output.extend([str(grad_output)])
else:
grad_init_op = caffe2_pb2.OperatorDef()
grad_init_op.type = "ConstantFill"
grad_init_op.input.extend([output_name])
grad_init_op.output.extend([grad_output])
value_arg = caffe2_pb2.Argument()
value_arg.name = "value"
value_arg.f = 0.0
grad_init_op.arg.extend([value_arg])
if grad_init_op:
grad_init_ops.append(grad_init_op)
return grad_init_ops
def _prepare_gradient_if_op(
fwd_op, input_names, output_names, then_grad_net, else_grad_net):
gradient_if_def = caffe2_pb2.OperatorDef()
gradient_if_def.CopyFrom(fwd_op)
del gradient_if_def.input[:]
gradient_if_def.input.extend(input_names)
del gradient_if_def.output[:]
gradient_if_def.output.extend(output_names)
then_net_arg = caffe2_pb2.Argument()
then_net_arg.name = "then_net"
then_net_arg.n.CopyFrom(then_grad_net)
gradient_args = [then_net_arg]
if else_grad_net:
else_net_arg = caffe2_pb2.Argument()
else_net_arg.name = "else_net"
else_net_arg.n.CopyFrom(else_grad_net)
gradient_args.append(else_net_arg)
del gradient_if_def.arg[:]
gradient_if_def.arg.extend(gradient_args)
if gradient_if_def.name:
gradient_if_def.name += "_grad"
del gradient_if_def.control_input[:]
gradient_if_def.is_gradient_op = True
return gradient_if_def
def disambiguate_grad_if_op_output(grad_op, idx, new_grad_output):
then_net = _get_net_argument(grad_op, "then_net")
old_grad_out_match = grad_op.output[idx]
for op in then_net.op:
for i, out in enumerate(op.output):
if out == old_grad_out_match:
op.output[i] = new_grad_output
else_net = _get_net_argument(grad_op, "else_net")
if else_net:
for op in else_net.op:
for i, out in enumerate(op.output):
if out == old_grad_out_match:
op.output[i] = new_grad_output
grad_op.output[idx] = new_grad_output
|
pytorch-master
|
caffe2/python/control_ops_grad.py
|
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import numpy as np
def FakeQuantization8BitsRowwise(data):
min_el = np.min(data, axis=1)
max_el = np.max(data, axis=1)
scale = (max_el - min_el) / 255.
bias = min_el
inv_scale = 1. / scale
data = data.T
data = np.round((data - bias) * inv_scale) * scale + bias
return data.T
class TestQuantize8bits(hu.HypothesisTestCase):
def test_quantize_op(self):
op = core.CreateOperator(
'FloatToRowwiseQuantized8Bits',
['input_data'],
['quantized_input', 'scale_bias'])
input_data = np.float32(np.asarray([[801., 786, 235.2, 2353.3434],
[5., 11., 9., -2.]]))
workspace.FeedBlob('input_data', input_data)
workspace.RunOperatorOnce(op)
op1 = core.CreateOperator(
'Rowwise8BitQuantizedToFloat',
['quantized_input', 'scale_bias'],
['dequantized_input'])
workspace.RunOperatorOnce(op1)
result = workspace.FetchBlob('dequantized_input')
ground_truth = FakeQuantization8BitsRowwise(input_data)
np.testing.assert_array_almost_equal(
result, ground_truth)
def test_quantize_tensor_with_const_row_op(self):
op = core.CreateOperator(
'FloatToRowwiseQuantized8Bits',
['input_data'],
['quantized_input', 'scale_bias'])
input_data = np.float32(np.asarray([[801., 786, 235.2, 2353.3434],
[9., 9., 9., 9.]]))
workspace.FeedBlob('input_data', input_data)
workspace.RunOperatorOnce(op)
op1 = core.CreateOperator(
'Rowwise8BitQuantizedToFloat',
['quantized_input', 'scale_bias'],
['dequantized_input'])
workspace.RunOperatorOnce(op1)
result = workspace.FetchBlob('dequantized_input')
ground_truth = FakeQuantization8BitsRowwise(input_data)
ground_truth[1, :] = 9.
np.testing.assert_array_almost_equal(
result, ground_truth)
def test_SparseSegmentUint8(self):
init_net = core.Net("init")
net = core.Net("bench")
size = 10**3
isize = 10**2
# input preparation
d = init_net.UniformFill([], shape=[size, 32])
w = init_net.UniformFill([], shape=[isize, ])
i = init_net.UniformIntFill([], shape=[isize], max=size - 1)
i = init_net.Cast([i], to=core.DataType.INT64)
l = init_net.ConstantFill(
[],
['l'],
shape=[isize // 10],
value=10,
dtype=core.DataType.INT32,
)
net.FloatToRowwiseQuantized8Bits([d],
['quantized_data', 'scale_bias'])
net.Rowwise8BitQuantizedToFloat(['quantized_data', 'scale_bias'],
['dequantized_data'])
# SparseLengthsWeightedSum
net.SparseLengthsWeightedSum(['dequantized_data', w, i, l],
['PositionWeighted_0'], engine='fp16')
net.SparseLengthsWeightedSum8BitsRowwise(
['quantized_data', w, i, l, 'scale_bias'],
['PositionWeighted_1'])
# SparseLengthsSum
net.SparseLengthsSum(['dequantized_data', i, l],
['Sum_0'], engine='fp16')
net.SparseLengthsSum8BitsRowwise(
['quantized_data', i, l, 'scale_bias'],
['Sum_1'])
# SparseLengthsWeightedMean
# net.SparseLengthsWeightedMean(['dequantized_data', w, i, l],
# ['WeightedMean_0'])
# net.SparseLengthsWeightedMean8BitsRowwise(
# ['quantized_data', w, i, l, 'scale_bias'],
# ['WeightedMean_1'])
# SparseLengthsMean
net.SparseLengthsMean(['dequantized_data', i, l],
['Mean_0'], engine='fp16')
net.SparseLengthsMean8BitsRowwise(
['quantized_data', i, l, 'scale_bias'],
['Mean_1'])
gathered_w = net.Gather(['quantized_data', i],
engine='fp16')
gathered_scale_bias = net.Gather(['scale_bias', i],
engine='fp16')
net.Rowwise8BitQuantizedToFloat(
[gathered_w, gathered_scale_bias],
'Gathered_1')
net.Gather(['dequantized_data', i], 'Gathered_0')
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
workspace.RunNetOnce(init_net)
workspace.CreateNet(net)
workspace.RunNetOnce(net)
PositionWeighted_1 = workspace.FetchBlob('PositionWeighted_1')
ground_truth_posw = workspace.FetchBlob('PositionWeighted_0')
np.testing.assert_array_almost_equal(PositionWeighted_1,
ground_truth_posw, decimal=5)
Sum_1 = workspace.FetchBlob('Sum_1')
ground_truth_sum = workspace.FetchBlob('Sum_0')
np.testing.assert_array_almost_equal(Sum_1,
ground_truth_sum, decimal=5)
Mean_1 = workspace.FetchBlob('Mean_1')
ground_truth_mean = workspace.FetchBlob('Mean_0')
np.testing.assert_array_almost_equal(Mean_1,
ground_truth_mean, decimal=5)
Gathered_1 = workspace.FetchBlob('Gathered_1')
ground_truth_gathered = workspace.FetchBlob('Gathered_0')
np.testing.assert_array_almost_equal(Gathered_1,
ground_truth_gathered, decimal=5)
|
pytorch-master
|
caffe2/python/lengths_reducer_rowwise_8bit_ops_test.py
|
## @package extension_loader
# Module caffe2.python.extension_loader
import contextlib
import ctypes
import sys
_set_global_flags = (
hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'))
@contextlib.contextmanager
def DlopenGuard(extra_flags=ctypes.RTLD_GLOBAL):
if _set_global_flags:
old_flags = sys.getdlopenflags()
sys.setdlopenflags(old_flags | extra_flags)
# in case we dlopen something that doesn't exist, yield will fail and throw;
# we need to remember reset the old flags to clean up, otherwise RTLD_GLOBAL
# flag will stick around and create symbol conflict problems
try:
yield
finally:
if _set_global_flags:
sys.setdlopenflags(old_flags)
|
pytorch-master
|
caffe2/python/extension_loader.py
|
## @package workspace
# Module caffe2.python.lazy
_import_lazy_calls = []
def RegisterLazyImport(lazy):
global _import_lazy_calls
_import_lazy_calls += [lazy]
def TriggerLazyImport():
global _import_lazy_calls
for lazy in _import_lazy_calls:
lazy()
|
pytorch-master
|
caffe2/python/lazy.py
|
from caffe2.python.dataio import (
CompositeReader,
CompositeReaderBuilder,
ReaderBuilder,
ReaderWithDelay,
ReaderWithLimit,
ReaderWithTimeLimit,
)
from caffe2.python.dataset import Dataset
from caffe2.python.db_file_reader import DBFileReader
from caffe2.python.pipeline import pipe
from caffe2.python.schema import Struct, NewRecord, FeedRecord
from caffe2.python.session import LocalSession
from caffe2.python.task import TaskGroup, final_output, WorkspaceType
from caffe2.python.test_util import TestCase
from caffe2.python.cached_reader import CachedReader
from caffe2.python import core, workspace, schema
from caffe2.python.net_builder import ops
import numpy as np
import numpy.testing as npt
import os
import shutil
import unittest
import tempfile
def make_source_dataset(ws, size=100, offset=0, name=None):
name = name or "src"
src_init = core.Net("{}_init".format(name))
with core.NameScope(name):
src_values = Struct(('label', np.array(range(offset, offset + size))))
src_blobs = NewRecord(src_init, src_values)
src_ds = Dataset(src_blobs, name=name)
FeedRecord(src_blobs, src_values, ws)
ws.run(src_init)
return src_ds
def make_destination_dataset(ws, schema, name=None):
name = name or 'dst'
dst_init = core.Net('{}_init'.format(name))
with core.NameScope(name):
dst_ds = Dataset(schema, name=name)
dst_ds.init_empty(dst_init)
ws.run(dst_init)
return dst_ds
class TestReaderBuilder(ReaderBuilder):
def __init__(self, name, size, offset):
self._schema = schema.Struct(
('label', schema.Scalar()),
)
self._name = name
self._size = size
self._offset = offset
self._src_ds = None
def schema(self):
return self._schema
def setup(self, ws):
self._src_ds = make_source_dataset(ws, offset=self._offset, size=self._size,
name=self._name)
return {}
def new_reader(self, **kwargs):
return self._src_ds
class TestCompositeReader(TestCase):
@unittest.skipIf(os.environ.get('JENKINS_URL'), 'Flaky test on Jenkins')
def test_composite_reader(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
num_srcs = 3
names = ["src_{}".format(i) for i in range(num_srcs)]
size = 100
offsets = [i * size for i in range(num_srcs)]
src_dses = [make_source_dataset(ws, offset=offset, size=size, name=name)
for (name, offset) in zip(names, offsets)]
data = [ws.fetch_blob(str(src.field_blobs[0])) for src in src_dses]
# Sanity check we didn't overwrite anything
for d, offset in zip(data, offsets):
npt.assert_array_equal(d, range(offset, offset + size))
# Make an identically-sized empty destination dataset
dst_ds_schema = schema.Struct(
*[
(name, src_ds.content().clone_schema())
for name, src_ds in zip(names, src_dses)
]
)
dst_ds = make_destination_dataset(ws, dst_ds_schema)
with TaskGroup() as tg:
reader = CompositeReader(names,
[src_ds.reader() for src_ds in src_dses])
pipe(reader, dst_ds.writer(), num_runtime_threads=3)
session.run(tg)
for i in range(num_srcs):
written_data = sorted(
ws.fetch_blob(str(dst_ds.content()[names[i]].label())))
npt.assert_array_equal(data[i], written_data, "i: {}".format(i))
@unittest.skipIf(os.environ.get('JENKINS_URL'), 'Flaky test on Jenkins')
def test_composite_reader_builder(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
num_srcs = 3
names = ["src_{}".format(i) for i in range(num_srcs)]
size = 100
offsets = [i * size for i in range(num_srcs)]
src_ds_builders = [
TestReaderBuilder(offset=offset, size=size, name=name)
for (name, offset) in zip(names, offsets)
]
# Make an identically-sized empty destination dataset
dst_ds_schema = schema.Struct(
*[
(name, src_ds_builder.schema())
for name, src_ds_builder in zip(names, src_ds_builders)
]
)
dst_ds = make_destination_dataset(ws, dst_ds_schema)
with TaskGroup() as tg:
reader_builder = CompositeReaderBuilder(
names, src_ds_builders)
reader_builder.setup(ws=ws)
pipe(reader_builder.new_reader(), dst_ds.writer(),
num_runtime_threads=3)
session.run(tg)
for name, offset in zip(names, offsets):
written_data = sorted(
ws.fetch_blob(str(dst_ds.content()[name].label())))
npt.assert_array_equal(range(offset, offset + size), written_data,
"name: {}".format(name))
class TestReaderWithLimit(TestCase):
def test_runtime_threads(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
src_ds = make_source_dataset(ws)
totals = [None] * 3
def proc(rec):
# executed once
with ops.task_init():
counter1 = ops.CreateCounter([], ['global_counter'])
counter2 = ops.CreateCounter([], ['global_counter2'])
counter3 = ops.CreateCounter([], ['global_counter3'])
# executed once per thread
with ops.task_instance_init():
task_counter = ops.CreateCounter([], ['task_counter'])
# executed on each iteration
ops.CountUp(counter1)
ops.CountUp(task_counter)
# executed once per thread
with ops.task_instance_exit():
with ops.loop(ops.RetrieveCount(task_counter)):
ops.CountUp(counter2)
ops.CountUp(counter3)
# executed once
with ops.task_exit():
totals[0] = final_output(ops.RetrieveCount(counter1))
totals[1] = final_output(ops.RetrieveCount(counter2))
totals[2] = final_output(ops.RetrieveCount(counter3))
return rec
# Read full data set from original reader
with TaskGroup() as tg:
pipe(src_ds.reader(), num_runtime_threads=8, processor=proc)
session.run(tg)
self.assertEqual(totals[0].fetch(), 100)
self.assertEqual(totals[1].fetch(), 100)
self.assertEqual(totals[2].fetch(), 8)
# Read with a count-limited reader
with TaskGroup() as tg:
q1 = pipe(src_ds.reader(), num_runtime_threads=2)
q2 = pipe(
ReaderWithLimit(q1.reader(), num_iter=25),
num_runtime_threads=3)
pipe(q2, processor=proc, num_runtime_threads=6)
session.run(tg)
self.assertEqual(totals[0].fetch(), 25)
self.assertEqual(totals[1].fetch(), 25)
self.assertEqual(totals[2].fetch(), 6)
def _test_limit_reader_init_shared(self, size):
ws = workspace.C.Workspace()
session = LocalSession(ws)
# Make source dataset
src_ds = make_source_dataset(ws, size=size)
# Make an identically-sized empty destination Dataset
dst_ds = make_destination_dataset(ws, src_ds.content().clone_schema())
return ws, session, src_ds, dst_ds
def _test_limit_reader_shared(self, reader_class, size, expected_read_len,
expected_read_len_threshold,
expected_finish, num_threads, read_delay,
**limiter_args):
ws, session, src_ds, dst_ds = \
self._test_limit_reader_init_shared(size)
# Read without limiter
# WorkspaceType.GLOBAL is required because we are fetching
# reader.data_finished() after the TaskGroup finishes.
with TaskGroup(workspace_type=WorkspaceType.GLOBAL) as tg:
if read_delay > 0:
reader = reader_class(ReaderWithDelay(src_ds.reader(),
read_delay),
**limiter_args)
else:
reader = reader_class(src_ds.reader(), **limiter_args)
pipe(reader, dst_ds.writer(), num_runtime_threads=num_threads)
session.run(tg)
read_len = len(sorted(ws.blobs[str(dst_ds.content().label())].fetch()))
# Do a fuzzy match (expected_read_len +/- expected_read_len_threshold)
# to eliminate flakiness for time-limited tests
self.assertGreaterEqual(
read_len,
expected_read_len - expected_read_len_threshold)
self.assertLessEqual(
read_len,
expected_read_len + expected_read_len_threshold)
self.assertEqual(
sorted(ws.blobs[str(dst_ds.content().label())].fetch()),
list(range(read_len))
)
self.assertEqual(ws.blobs[str(reader.data_finished())].fetch(),
expected_finish)
def test_count_limit_reader_without_limit(self):
# No iter count specified, should read all records.
self._test_limit_reader_shared(ReaderWithLimit,
size=100,
expected_read_len=100,
expected_read_len_threshold=0,
expected_finish=True,
num_threads=8,
read_delay=0,
num_iter=None)
def test_count_limit_reader_with_zero_limit(self):
# Zero iter count specified, should read 0 records.
self._test_limit_reader_shared(ReaderWithLimit,
size=100,
expected_read_len=0,
expected_read_len_threshold=0,
expected_finish=False,
num_threads=8,
read_delay=0,
num_iter=0)
def test_count_limit_reader_with_low_limit(self):
# Read with limit smaller than size of dataset
self._test_limit_reader_shared(ReaderWithLimit,
size=100,
expected_read_len=10,
expected_read_len_threshold=0,
expected_finish=False,
num_threads=8,
read_delay=0,
num_iter=10)
def test_count_limit_reader_with_high_limit(self):
# Read with limit larger than size of dataset
self._test_limit_reader_shared(ReaderWithLimit,
size=100,
expected_read_len=100,
expected_read_len_threshold=0,
expected_finish=True,
num_threads=8,
read_delay=0,
num_iter=110)
def test_time_limit_reader_without_limit(self):
# No duration specified, should read all records.
self._test_limit_reader_shared(ReaderWithTimeLimit,
size=100,
expected_read_len=100,
expected_read_len_threshold=0,
expected_finish=True,
num_threads=8,
read_delay=0.1,
duration=0)
def test_time_limit_reader_with_short_limit(self):
# Read with insufficient time limit
size = 50
num_threads = 4
sleep_duration = 0.25
duration = 1
expected_read_len = int(round(num_threads * duration / sleep_duration))
# Because the time limit check happens before the delay + read op,
# subtract a little bit of time to ensure we don't get in an extra read
duration = duration - 0.25 * sleep_duration
# NOTE: `expected_read_len_threshold` was added because this test case
# has significant execution variation under stress. Under stress, we may
# read strictly less than the expected # of samples; anywhere from
# [0,N] where N = expected_read_len.
# Hence we set expected_read_len to N/2, plus or minus N/2.
self._test_limit_reader_shared(ReaderWithTimeLimit,
size=size,
expected_read_len=expected_read_len / 2,
expected_read_len_threshold=expected_read_len / 2,
expected_finish=False,
num_threads=num_threads,
read_delay=sleep_duration,
duration=duration)
def test_time_limit_reader_with_long_limit(self):
# Read with ample time limit
# NOTE: we don't use `expected_read_len_threshold` because the duration,
# read_delay, and # threads should be more than sufficient
self._test_limit_reader_shared(ReaderWithTimeLimit,
size=50,
expected_read_len=50,
expected_read_len_threshold=0,
expected_finish=True,
num_threads=4,
read_delay=0.2,
duration=10)
class TestDBFileReader(TestCase):
def setUp(self):
self.temp_paths = []
def tearDown(self):
# In case any test method fails, clean up temp paths.
for path in self.temp_paths:
self._delete_path(path)
@staticmethod
def _delete_path(path):
if os.path.isfile(path):
os.remove(path) # Remove file.
elif os.path.isdir(path):
shutil.rmtree(path) # Remove dir recursively.
def _make_temp_path(self):
# Make a temp path as db_path.
with tempfile.NamedTemporaryFile() as f:
temp_path = f.name
self.temp_paths.append(temp_path)
return temp_path
@staticmethod
def _build_source_reader(ws, size):
src_ds = make_source_dataset(ws, size)
return src_ds.reader()
@staticmethod
def _read_all_data(ws, reader, session):
dst_ds = make_destination_dataset(ws, reader.schema().clone_schema())
with TaskGroup() as tg:
pipe(reader, dst_ds.writer(), num_runtime_threads=8)
session.run(tg)
return ws.blobs[str(dst_ds.content().label())].fetch()
@unittest.skipIf("LevelDB" not in core.C.registered_dbs(), "Need LevelDB")
def test_cached_reader(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
db_path = self._make_temp_path()
# Read data for the first time.
cached_reader1 = CachedReader(
self._build_source_reader(ws, 100), db_path, loop_over=False,
)
build_cache_step = cached_reader1.build_cache_step()
session.run(build_cache_step)
data = self._read_all_data(ws, cached_reader1, session)
self.assertEqual(sorted(data), list(range(100)))
# Read data from cache.
cached_reader2 = CachedReader(
self._build_source_reader(ws, 200), db_path,
)
build_cache_step = cached_reader2.build_cache_step()
session.run(build_cache_step)
data = self._read_all_data(ws, cached_reader2, session)
self.assertEqual(sorted(data), list(range(100)))
self._delete_path(db_path)
# We removed cache so we expect to receive data from original reader.
cached_reader3 = CachedReader(
self._build_source_reader(ws, 300), db_path,
)
build_cache_step = cached_reader3.build_cache_step()
session.run(build_cache_step)
data = self._read_all_data(ws, cached_reader3, session)
self.assertEqual(sorted(data), list(range(300)))
self._delete_path(db_path)
@unittest.skipIf("LevelDB" not in core.C.registered_dbs(), "Need LevelDB")
def test_db_file_reader(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
db_path = self._make_temp_path()
# Build a cache DB file.
cached_reader = CachedReader(
self._build_source_reader(ws, 100),
db_path=db_path,
db_type='LevelDB',
)
build_cache_step = cached_reader.build_cache_step()
session.run(build_cache_step)
# Read data from cache DB file.
db_file_reader = DBFileReader(
db_path=db_path,
db_type='LevelDB',
)
data = self._read_all_data(ws, db_file_reader, session)
self.assertEqual(sorted(data), list(range(100)))
self._delete_path(db_path)
|
pytorch-master
|
caffe2/python/dataio_test.py
|
## @package dyndep
# Module caffe2.python.dyndep
import ctypes
import os
from threading import Lock
from caffe2.python import core, extension_loader
def InitOpsLibrary(name, trigger_lazy=True):
"""Loads a dynamic library that contains custom operators into Caffe2.
Since Caffe2 uses static variable registration, you can optionally load a
separate .so file that contains custom operators and registers that into
the caffe2 core binary. In C++, this is usually done by either declaring
dependency during compilation time, or via dynload. This allows us to do
registration similarly on the Python side.
Args:
name: a name that ends in .so, such as "my_custom_op.so". Otherwise,
the command will simply be ignored.
Returns:
None
"""
if not os.path.exists(name):
# Note(jiayq): if the name does not exist, instead of immediately
# failing we will simply print a warning, deferring failure to the
# time when an actual call is made.
print('Ignoring {} as it is not a valid file.'.format(name))
return
_init_impl(name, trigger_lazy=trigger_lazy)
_IMPORTED_DYNDEPS = set()
dll_lock = Lock()
def GetImportedOpsLibraries():
return _IMPORTED_DYNDEPS
def _init_impl(path, trigger_lazy=True):
with dll_lock:
_IMPORTED_DYNDEPS.add(path)
with extension_loader.DlopenGuard():
ctypes.CDLL(path)
# reinitialize available ops
core.RefreshRegisteredOperators(trigger_lazy)
|
pytorch-master
|
caffe2/python/dyndep.py
|
from caffe2.python import workspace
import unittest
class TestOperator(unittest.TestCase):
def setUp(self):
workspace.ResetWorkspace()
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/convert_test.py
|
import numpy as np
import unittest
from caffe2.python import core, workspace, test_util
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
The test code carries a simple toy regression in the form
y = 2.0 x1 + 1.5 x2 + 0.5
by randomly generating gaussian inputs and calculating the ground
truth outputs in the net as well. It uses a standard SGD to then
train the parameters.
"""
workspace.ResetWorkspace()
init_net = core.Net("init")
W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
W_gt = init_net.GivenTensorFill(
[], "W_gt", shape=[1, 2], values=[2.0, 1.5])
B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
ITER = init_net.ConstantFill([], "ITER", shape=[1], value=0,
dtype=core.DataType.INT64)
train_net = core.Net("train")
X = train_net.GaussianFill([], "X", shape=[64, 2], mean=0.0, std=1.0)
Y_gt = X.FC([W_gt, B_gt], "Y_gt")
Y_pred = X.FC([W, B], "Y_pred")
dist = train_net.SquaredL2Distance([Y_gt, Y_pred], "dist")
loss = dist.AveragedLoss([], ["loss"])
# Get gradients for all the computations above. Note that in fact we
# don't need to get the gradient the Y_gt computation, but we'll just
# leave it there. In many cases, I am expecting one to load X and Y
# from the disk, so there is really no operator that will calculate the
# Y_gt input.
input_to_grad = train_net.AddGradientOperators([loss], skip=2)
# updates
train_net.Iter(ITER, ITER)
train_net.LearningRate(ITER, "LR", base_lr=-0.1,
policy="step", stepsize=20, gamma=0.9)
train_net.WeightedSum([W, ONE, input_to_grad[str(W)], LR], W)
train_net.WeightedSum([B, ONE, input_to_grad[str(B)], LR], B)
for blob in [loss, W, B]:
train_net.Print(blob, [])
# the CPU part.
plan = core.Plan("toy_regression")
plan.AddStep(core.ExecutionStep("init", init_net))
plan.AddStep(core.ExecutionStep("train", train_net, 200))
workspace.RunPlan(plan)
W_result = workspace.FetchBlob("W")
B_result = workspace.FetchBlob("B")
np.testing.assert_array_almost_equal(W_result, [[2.0, 1.5]], decimal=2)
np.testing.assert_array_almost_equal(B_result, [0.5], decimal=2)
workspace.ResetWorkspace()
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/toy_regression_test.py
|
## @package data_workers
# Module caffe2.python.data_workers
'''
This module provides a python-land multithreaded data input mechanism
for Caffe2 nets.
Basic usage is as follows:
coordinator = data_workers.init_data_input_workers(
net,
["data", "label"],
my_fetch_fun,
batch_size=32,
input_source_name="train",
dont_rebatch=False
)
...
coordinator.start()
First argument is the Caffe2 net (or model helper), and second argument
is list of input blobs that are to be fed.
Argument 'input_source_name' is used to distinguish different sources of data,
such as train or test data. This is to ensure the data does not get mixed up,
although two nets would share blobs.
To do the actual data loading, one defines a "fetcher function"
that has call signature
my_fetch_fun(worker_id, batch_size)
Optionally, one can define a "init function" that is called once before
threads start, and has call signature:
my_init_fun(data_coordinator, global_coordinator)
If dont_rebatch is set to True, the data input is not batched into equal sized
chunks but data directly provided by fetchers is used.
'batch_columns' can be used to specify which dimension is the batch dimension,
for each of the inputs. Default is 0 for all iputs.
'timeout' is the timeout in seconds after which if no data is available, the
net will fail (default 600s = 10 mins).
This function returns a list of numpy arrays corresponding to the different
input blobs. In the example above, it would return two arrays, one for the
data blob and another for the labels. These arrays can have arbitrary number
of elements (i.e they do not need to match the batch size). The batch size
is provided for the function as a hint only.
For example, fetcher function could download images from a remote service or
load random images from a directory on a file system.
For a dummy example, see the data_workers_test unit test.
Note that for data_parallel_models, init_data_input_workers will be called
for each GPU. Note that the 'coordinator' returned by the function is same
each time.
'''
import queue as Queue
from itertools import chain
import logging
import threading
import numpy as np
import time
from caffe2.python import workspace, core, scope, utils
from caffe2.proto import caffe2_pb2
from caffe2.python.parallel_workers import Metrics, State, \
WorkerCoordinator, GlobalWorkerCoordinator, Worker, run_worker
log = logging.getLogger("data_workers")
log.setLevel(logging.INFO)
LOG_INT_SECS = 60
def get_worker_ids(num_workers):
return list(range(0, num_workers))
def init_data_input_workers(
net,
input_blob_names,
fetch_fun,
batch_size,
num_worker_threads=2,
input_source_name="train",
max_buffered_batches=800,
init_fun=None,
external_loggers=None,
dont_rebatch=False,
batch_columns=None,
timeout=600
):
global global_coordinator
device_option = scope.CurrentDeviceScope()
if (device_option is None):
device_option = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU)
metrics = Metrics(external_loggers)
batch_feeder = BatchFeeder(
net,
input_blob_names,
batch_size,
device_option,
scope.CurrentNameScope(),
input_source_name,
global_coordinator.get_queue(input_source_name, max_buffered_batches),
metrics,
dont_rebatch,
batch_columns,
timeout=timeout
)
# Launch fetch worker threads
worker_ids = [
global_coordinator.get_new_worker_id()
for i in range(num_worker_threads)
]
# Create coordinator object
coordinator = WorkerCoordinator(
input_source_name, worker_ids, init_fun, batch_feeder)
workers = [
threading.Thread(
target=run_worker,
name="data_workers fetcher id {}".format(worker_id),
args=[coordinator,
DataWorker(coordinator, worker_id, fetch_fun, metrics,
batch_size, batch_feeder)],
) for worker_id in worker_ids
]
workers.append(threading.Thread(
target=enqueuer,
name="Enqueuer {} {}".format(input_source_name, scope.CurrentNameScope()),
args=[coordinator, batch_feeder]))
coordinator._workers = workers
global_coordinator.add(coordinator)
return global_coordinator
class BatchFeeder(State):
def __init__(self, net, input_blob_names, batch_size,
device_option, namescope, input_source_name, queue,
metrics, dont_rebatch, batch_columns, timeout=600):
self._counter = 0
self._input_blob_names = input_blob_names
self._batch_size = batch_size
self._internal_queue = queue
self._queues = []
self._device_option = device_option
self._namescope = namescope
self._timeout = timeout
self._input_source_name = input_source_name
self._c2_queue_capacity = 4
self._create_caffe2_queues(net)
self._create_caffe2_ops(net)
self._inputs = 0
self._prev_seconds = 0
self._last_warning = time.time()
self._dont_rebatch = dont_rebatch
self._init_scratch()
self._metrics = metrics
if batch_columns is None:
batch_columns = [0 for _ in input_blob_names]
self._batch_columns = batch_columns
def start(self):
self._inputs = 0
self._prev_seconds = time.time()
def stop(self):
try:
for q in self._queues:
workspace.RunOperatorOnce(
core.CreateOperator("CloseBlobsQueue", [q], [])
)
finally:
self._log_inputs_per_interval(0, force=True)
def cleanup(self):
utils.ResetBlobs(self._scratch_blob.values())
utils.ResetBlobs(self._scratch_status.values())
def _get(self, data_input_coordinator):
start_time = time.time()
last_warning = time.time()
while data_input_coordinator.is_active():
try:
return self._internal_queue.get(block=True, timeout=0.5)
except Queue.Empty:
if time.time() - last_warning > 10.0:
log.warning("** Data input is slow: (still) no data in {} secs.".format(
time.time() - start_time))
last_warning = time.time()
continue
return None
def _validate_chunk(self, chunk):
if chunk is None:
log.warning("Fetcher function returned None")
return False
assert len(chunk) == len(self._input_blob_names), \
"Expecting data blob for each input"
for d in chunk:
assert isinstance(d, np.ndarray), \
"Fetcher function must return a numpy array"
if not self._dont_rebatch:
j = 1
for d in chunk[1:]:
assert d.shape[self._batch_columns[j]] == \
chunk[0].shape[self._batch_columns[0]], \
"Each returned input must have equal number of samples"
j += 1
if len(chunk) == 0:
log.warning("Worker provided zero length input")
return False
return True
def put(self, chunk, data_input_coordinator):
if not self._validate_chunk(chunk):
return
while data_input_coordinator.is_active():
try:
qsize = self._internal_queue.qsize()
if qsize < 2 and (time.time() - self._last_warning) > LOG_INT_SECS:
log.warning("Warning, data loading lagging behind: " +
"queue size={}, name={}".format(qsize, self._input_source_name))
self._last_warning = time.time()
self._counter += 1
self._internal_queue.put(chunk, block=True, timeout=0.5)
self._log_inputs_per_interval(chunk[0].shape[0])
return
except Queue.Full:
log.debug("Queue full: stalling fetchers...")
continue
def _enqueue_batch_direct(self, data_input_coordinator):
data = self._get(data_input_coordinator)
if data is None:
return
if data_input_coordinator.is_active():
for b, q, c in zip(self._input_blob_names, self._queues, data):
self._enqueue(b, q, c)
def _enqueue_batch(self, data_input_coordinator):
'''
This pulls data from the python-side queue and collects them
into batch-sized pieces, unless dont_rebatch is set to true.
'''
if self._dont_rebatch:
self._enqueue_batch_direct(data_input_coordinator)
return
cur_batch = [np.array([]) for d in self._input_blob_names]
first_batch_col = self._batch_columns[0]
# Collect data until we have a full batch size
while (
cur_batch[0].shape[0] == 0 or
cur_batch[0].shape[first_batch_col] < self._batch_size
) and data_input_coordinator.is_active():
chunk = self._get(data_input_coordinator)
if chunk is None:
continue
for j, chunk_elem in enumerate(chunk):
if cur_batch[j].shape[0] == 0:
cur_batch[j] = chunk_elem.copy()
else:
cur_batch[j] = np.append(
cur_batch[j], chunk_elem, axis=self._batch_columns[j]
)
start_time = time.time()
try:
# Return data over the batch size back to queue
if cur_batch[0].shape[0] > 0 and cur_batch[0].shape[
first_batch_col
] > self._batch_size:
leftover = []
trimmed_batch = []
for j, b in enumerate(cur_batch):
[c, l] = np.split(
b, [self._batch_size], axis=self._batch_columns[j]
)
leftover.append(l)
trimmed_batch.append(c)
cur_batch = trimmed_batch
try:
self._internal_queue.put(leftover, block=False)
except Queue.Full:
pass
assert cur_batch[0].shape[first_batch_col] == self._batch_size
if data_input_coordinator.is_active():
for b, q, c in zip(
self._input_blob_names, self._queues, cur_batch
):
self._enqueue(b, q, c)
finally:
self._metrics.put_metric('enqueue_time', time.time() - start_time)
def _init_scratch(self):
self._scratch_blob = {}
self._scratch_status = {}
for blob_name in self._input_blob_names:
scratch_name = self._namescope + blob_name + \
"_scratch_" + self._input_source_name
self._scratch_blob[blob_name] = core.BlobReference(scratch_name)
self._scratch_status[blob_name] = core.BlobReference(
scratch_name + "_status"
)
# Feed empty arrays to the scratch blobs here, so that there won't be
# race conditions when calling FeedBlob (which calls wworkspace
# CreateBlob()) from enqueue threads
for b in chain(
self._scratch_blob.values(), self._scratch_status.values()
):
workspace.FeedBlob(
b,
np.array([]).astype(np.float32),
device_option=self._device_option,
)
def _enqueue(self, blob_name, queue, data_arr):
'''
Enqueue the correctly sized batch arrays to Caffe2's queue.
'''
workspace.FeedBlob(
self._scratch_blob[blob_name],
data_arr,
device_option=self._device_option
)
op = core.CreateOperator(
"SafeEnqueueBlobs",
[queue, self._scratch_blob[blob_name]],
[self._scratch_blob[blob_name], self._scratch_status[blob_name]],
device_option=self._device_option
)
workspace.RunOperatorOnce(op)
def _create_caffe2_queues(self, net):
'''
Creates queues on caffe2 side
'''
def create_queue(queue_name, num_blobs, capacity):
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateBlobsQueue",
[], [queue_name],
num_blobs=1,
capacity=capacity))
return core.ScopedBlobReference(queue_name)
for blob_name in self._input_blob_names:
qname = blob_name + "_c2queue" + "_" + self._input_source_name
q = create_queue(
qname, num_blobs=1, capacity=self._c2_queue_capacity
)
self._queues.append(q)
def _create_caffe2_ops(self, net):
'''
Creates dequeue-ops on caffe2 side
'''
for q, blob_name in zip(self._queues, self._input_blob_names):
# Add operator to the Caffe2 network to dequeue
net.DequeueBlobs(q, blob_name, timeout_secs=float(self._timeout))
def _log_inputs_per_interval(self, inputs, force=False):
self._inputs += inputs
current_seconds = time.time()
delta_seconds = current_seconds - self._prev_seconds
if delta_seconds >= LOG_INT_SECS or force:
inputs_per_sec = int(self._inputs / delta_seconds)
qsize = self._internal_queue.qsize()
log.info("{}/{}: {} inputs/sec".format(
self._input_source_name,
self._namescope,
inputs_per_sec,
))
log.info("-- queue: {} batches".format(qsize))
# log and reset perf metrics
self._metrics.put_metric(
'inputs_per_sec', inputs_per_sec, False)
self._metrics.put_metric('queue_size', qsize, False)
self._metrics.put_metric(
'time_elapsed', delta_seconds, False)
self._metrics.log_metrics()
self._metrics.reset_metrics()
self._inputs = 0
self._prev_seconds = current_seconds
class GlobalCoordinator(GlobalWorkerCoordinator):
def __init__(self):
GlobalWorkerCoordinator.__init__(self)
self._queues = {}
def get_queue(self, queue_name, max_buffered_batches):
assert isinstance(max_buffered_batches, int)
if queue_name not in self._queues:
self._queues[queue_name] = Queue.Queue(maxsize=max_buffered_batches)
return self._queues[queue_name]
def reset_data_input(self, namescope, name, net, batch_size):
log.info("Reset data input {}, batch size {}: ".format(name, batch_size))
for c in self._coordinators:
if c._worker_name == name and c._state._namescope == namescope:
c._state._batch_size = batch_size
c._state._create_caffe2_ops(net)
class DataWorker(Worker):
def __init__(
self,
coordinator,
worker_id,
worker_fun,
metrics,
batch_size,
batch_feeder
):
Worker.__init__(self, coordinator, worker_id, worker_fun=worker_fun,
metrics=metrics)
self._batch_size = batch_size
self._batch_feeder = batch_feeder
def run(self):
input_data = self._worker_fun(self._worker_id, self._batch_size)
self._batch_feeder.put(input_data, self._coordinator)
def finish(self):
self._metrics.put_metric(
'fetcher_time', time.time() - self._start_time)
global_coordinator = GlobalCoordinator()
def enqueuer(coordinator, batch_feeder):
while coordinator.is_active():
batch_feeder._enqueue_batch(coordinator)
|
pytorch-master
|
caffe2/python/data_workers.py
|
## @package data_parallel_model
# Module caffe2.python.data_parallel_model
from collections import OrderedDict
from future.utils import viewitems, viewkeys, viewvalues
import logging
import copy
from multiprocessing import cpu_count
from caffe2.python import \
model_helper, dyndep, scope, workspace, core, memonger, utils
from caffe2.proto import caffe2_pb2
import numpy as np
import warnings
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops")
# We only import nccl operators when the machine has GPUs
# Otherwise the binary can be compiled with CPU-only mode, and
# will not be able to find those modules
if workspace.NumGpuDevices() > 0:
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nccl:nccl_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops_gpu")
log = logging.getLogger("data_parallel_model")
log.setLevel(logging.INFO)
_DEFAULT_TIMEOUT_SEC = 30
_DEFAULT_BARRIER_NET_TIMEOUT_SEC = 300
def Parallelize_GPU(*args, **kwargs):
kwargs['cpu_device'] = False
Parallelize(*args, **kwargs)
def Parallelize_CPU(*args, **kwargs):
kwargs['cpu_device'] = True
Parallelize(*args, **kwargs)
def Parallelize_iDeep(*args, **kwargs):
kwargs['ideep'] = True
Parallelize(*args, **kwargs)
def Parallelize(
model_helper_obj,
input_builder_fun,
forward_pass_builder_fun,
param_update_builder_fun=None,
optimizer_builder_fun=None,
post_sync_builder_fun=None,
pre_grad_net_transformer_fun=None,
net_transformer_fun=None,
devices=None,
rendezvous=None,
net_type='dag',
broadcast_computed_params=True,
optimize_gradient_memory=False,
dynamic_memory_management=False,
blobs_to_keep=None,
use_nccl=False,
max_concurrent_distributed_ops=16,
cpu_device=False,
ideep=False,
num_threads_per_device=4,
shared_model=False,
combine_spatial_bn=False,
barrier_net_timeout_sec=_DEFAULT_BARRIER_NET_TIMEOUT_SEC,
):
'''
Function to create a model that can run on many GPUs or CPUs.
model_helper_obj: an object of ModelHelper
input_builder_fun:
Function that adds the input operators
Note: Remember to instantiate reader outside of this
function so all devices share same reader object.
Signature: input_builder_fun(model)
forward_pass_builder_fun:
Function to add the operators to the model.
Must return list of loss-blob references that
are used to build the gradient. Loss scale parameter
is passed, as you should scale the loss of your model
by 1.0 / the total number of devices.
Signature: forward_pass_builder_fun(model, loss_scale)
param_update_builder_fun:
Function that adds operators that are run after
gradient update, such as updating the weights and
weight decaying. This is called for each GPU separately.
Signature: param_update_builder_fun(model)
optimizer_builder_fun:
Alternative to param_update_builder_fun, allows one
to add an optimizer for the whole model. Called only
once, without name or devicescope.
net_transformer_fun:
Optional function to transform the network after the
network is built. It will be called once (NOT once per
GPU.)
Signature:
net_transformer_fun(
model, num_devices, device_prefix, device_type)
pre_grad_net_transformer_fun:
Optional function to transform the network similar to
net_transformer_fun, but happens before gradient ops
been add.
Signature: pre_grad_net_transformer_fun(model)
post_sync_builder_fun:
Function applied after initial parameter sync has been
completed, such as keeping multi-precision parameters
in sync.
Signature: post_sync_builder_fun(model)
devices: List of GPU ids, such as [0, 1, 2, 3],
rendezvous: used for rendezvous in distributed computation, if None
then only one node is used. To create rendezvous,
use <TBD>.
net_type: Network type
optimize_gradient_memory: whether to apply 'memonger' to share blobs
shared_model (only for CPU) use same parameters on each device
in gradient computation to reduce memory footprint.
dynamic_memory_management: Whether to apply dynamic memory optimization
by freeing unused blobs. The underlying (de)allocation
uses cached allocator. For GPU training PLEASE MAKE SURE
caffe2_cuda_memory_pool is set.
blobs_to_keep : A list of blob names to keep and don't free during
dynamic memory optimization (for example loss blob).
cpu_device Use CPU instead of GPU.
ideep Use ideep.
combine_spatial_bn:
When set to True, applies batch normalization across
all devices within the node. If False, batch
normalization will be done separately for each device.
This option is currently only supported on the CPU.
barrier_net_timeout_sec:
The timeout in seconds of the barrier net, which is run
to synchronize shards before a training epoch starts.
Defaults to 300 seconds.
'''
assert scope.CurrentDeviceScope() is None \
or scope.CurrentDeviceScope().device_type == caffe2_pb2.CPU, \
"Parallelize must be called without device-scope, \
device scope was: {}".format(scope.CurrentDeviceScope())
if devices is None:
if not (cpu_device or ideep):
devices = list(range(0, workspace.NumCudaDevices()))
else:
devices = list(range(0, cpu_count()))
if not (cpu_device or ideep):
for gpu in devices:
if gpu >= workspace.NumGpuDevices():
log.warning("** Only {} GPUs available, GPUs {} requested".format(
workspace.NumGpuDevices(), devices))
break
model_helper_obj._device_type = workspace.GpuDeviceType
model_helper_obj._device_prefix = "gpu"
model_helper_obj._shared_model = False
device_name = "GPU"
assert shared_model is False, "Shared model only supported on CPU"
elif ideep:
model_helper_obj._device_type = caffe2_pb2.IDEEP
model_helper_obj._device_prefix = "ideep"
device_name = "IDEEP"
model_helper_obj._shared_model = shared_model
if shared_model and rendezvous is not None:
assert "Shared model only supported on single-node currently"
else:
model_helper_obj._device_type = caffe2_pb2.CPU
model_helper_obj._device_prefix = "cpu"
device_name = "CPU"
model_helper_obj._shared_model = shared_model
if shared_model and rendezvous is not None:
assert "Shared model only supported on single-node currently"
log.info("Parallelizing model for devices: {}".format(devices))
extra_workers = 8 if rendezvous is not None else 0 # best-guess
num_workers = len(devices) * num_threads_per_device + extra_workers
max_concurrent_distributed_ops =\
min(max_concurrent_distributed_ops, num_workers - 1)
model_helper_obj.net.Proto().num_workers = num_workers
model_helper_obj.net.Proto().type = net_type
# Store some information in the model -- a bit ugly
model_helper_obj._devices = devices
model_helper_obj._rendezvous = rendezvous
model_helper_obj._sync_barrier_net = None
model_helper_obj._broadcast_context = None
model_helper_obj._grad_names = []
assert isinstance(model_helper_obj, model_helper.ModelHelper)
# Keep track of params that were in the model before: they are not
# data parallel, so we need to handle them separately
non_datapar_params = copy.copy(model_helper_obj.params)
# Add input and model
log.info("Create input and model training operators")
losses_by_gpu = {}
num_shards = 1 if rendezvous is None else rendezvous['num_shards']
loss_scale = 1.0 / (len(devices) * num_shards)
has_parameter_updates = param_update_builder_fun is not None or \
optimizer_builder_fun is not None
assert not (
param_update_builder_fun is not None and
optimizer_builder_fun is not None
), 'Can only specify one of param_update_builder_fun, optimizer_builder_fun'
# Check that a model that is used for validation/testing has
# init_params False, otherwise running the param init net will overwrite
# synchronized values by the training net
if not has_parameter_updates and model_helper_obj.init_params:
log.warning('')
log.warning("############# WARNING #############")
log.warning("Model {}/{} is used for testing/validation but".format(
model_helper_obj.name, model_helper_obj))
log.warning("has init_params=True!")
log.warning("This can conflict with model training.")
log.warning("Please ensure model = ModelHelper(init_params=False)")
log.warning('####################################')
log.warning('')
# TODO: make into assert
for device in devices:
device_opt = core.DeviceOption(model_helper_obj._device_type, device)
with core.DeviceScope(device_opt):
with core.NameScope("{}_{}".format(model_helper_obj._device_prefix,
device)):
log.info("Model for {} : {}".format(device_name, device))
input_builder_fun(model_helper_obj)
losses = forward_pass_builder_fun(model_helper_obj, loss_scale)
# Losses are not needed for test net
if has_parameter_updates:
assert isinstance(losses, list), \
'Model builder function must return list of loss blobs'
for loss in losses:
assert isinstance(loss, core.BlobReference), \
'Model builder func must return list of loss blobs'
losses_by_gpu[device] = losses
_ValidateParams(model_helper_obj.params)
# Create parameter map
model_helper_obj._device_grouped_blobs =\
_GroupByDevice(model_helper_obj, devices,
model_helper_obj.params, non_datapar_params)
# computed params
computed_params_grouped =\
_GroupByDevice(model_helper_obj, devices,
model_helper_obj.GetComputedParams(''), [])
model_helper_obj._device_grouped_blobs.update(computed_params_grouped)
model_helper_obj._param_names =\
list(viewkeys(model_helper_obj._device_grouped_blobs))
model_helper_obj._computed_param_names =\
list(viewkeys(computed_params_grouped))
if pre_grad_net_transformer_fun:
pre_grad_net_transformer_fun(model_helper_obj)
if has_parameter_updates:
log.info("Adding gradient operators")
_AddGradientOperators(devices, model_helper_obj, losses_by_gpu)
if net_transformer_fun:
net_transformer_fun(
model_helper_obj,
len(devices),
model_helper_obj._device_prefix,
model_helper_obj._device_type)
if not has_parameter_updates:
log.info("Parameter update function not defined --> only forward")
_InferBlobDevice(model_helper_obj)
return
if combine_spatial_bn:
assert(has_parameter_updates), \
'combine_spatial_bn should only be used for train model'
_InterleaveOps(model_helper_obj)
if cpu_device:
_CPUInterDeviceBatchNormalization(model_helper_obj)
else:
_GPUInterDeviceBatchNormalization(model_helper_obj)
_ValidateParams(model_helper_obj.params)
# Group gradients by device and register to blob lookup
param_to_grad = model_helper_obj.param_to_grad
grads_ordered = [param_to_grad[p] for p in
model_helper_obj.params if p in param_to_grad]
non_datapar_grads = [param_to_grad[p] for p in non_datapar_params]
gradients_grouped = _GroupByDevice(
model_helper_obj,
devices,
grads_ordered,
non_datapar_grads
)
model_helper_obj._device_grouped_blobs.update(gradients_grouped)
model_helper_obj._grad_names = list(viewkeys(gradients_grouped))
model_helper_obj._losses_by_gpu = losses_by_gpu
_InferBlobDevice(model_helper_obj)
log.info("Add gradient all-reduces for SyncSGD")
if broadcast_computed_params:
_BroadcastComputedParams(devices, model_helper_obj, rendezvous, use_nccl)
if len(model_helper_obj._grad_names) > 0:
# Gradients in reverse order
reverse_ordered_grads = _GetReverseOrderedGrads(model_helper_obj)
assert(len(reverse_ordered_grads) > 0)
_AllReduceBlobs(
reverse_ordered_grads,
devices,
model_helper_obj,
model_helper_obj.net,
rendezvous,
use_nccl,
max_concurrent_distributed_ops,
)
else:
log.info("NOTE: Param builder function did not create any parameters.")
log.info("Post-iteration operators for updating params")
num_shards = 1 if rendezvous is None else rendezvous['num_shards']
all_params = set(model_helper_obj.GetParams(''))
if shared_model:
_PruneParametersForSharing(model_helper_obj)
if param_update_builder_fun is not None:
for device in devices:
device_opt = core.DeviceOption(model_helper_obj._device_type, device)
with core.DeviceScope(device_opt):
with core.NameScope(
"{}_{}".format(model_helper_obj._device_prefix, device)
):
param_update_builder_fun(model_helper_obj)
else:
log.info("Calling optimizer builder function")
optimizer = optimizer_builder_fun(model_helper_obj)
model_helper_obj._optimizer = optimizer
(sync_blobs, sync_names) = _ComputeBlobsToSync(model_helper_obj)
sync_blobs_grouped = _GroupByDevice(
model_helper_obj,
devices,
sync_blobs,
[],
)
model_helper_obj._device_grouped_blobs.update(sync_blobs_grouped)
_InferBlobDevice(model_helper_obj)
_AnalyzeOperators(model_helper_obj)
# Configure dagnet to run with only one worker on the first iteration,
# to prevent concurrency problems with allocs and nccl.
arg = model_helper_obj.Proto().arg.add()
arg.name = "first_iter_only_one_worker"
arg.i = 1
# Add initial parameter syncs
log.info("Add initial parameter sync")
_SyncAllParams(
devices,
model_helper_obj,
model_helper_obj.param_init_net,
model_helper_obj.param_init_net,
rendezvous,
sync_names,
max_concurrent_distributed_ops=1
)
# Handle any operations that need to be done after parameter sync
# i.e. making sure multi-precision copies of parameters are up-to-date
if post_sync_builder_fun is not None:
for device in devices:
device_opt = core.DeviceOption(model_helper_obj._device_type, device)
with core.DeviceScope(device_opt):
with core.NameScope(
"{}_{}".format(model_helper_obj._device_prefix, device)
):
post_sync_builder_fun(model_helper_obj)
assert not (optimize_gradient_memory and dynamic_memory_management), \
"""It is not advised to use gradient optimization ('memonger')
with dynamic memory management."""
if optimize_gradient_memory:
_OptimizeGradientMemorySimple(model_helper_obj, losses_by_gpu, devices)
if dynamic_memory_management:
_AddDynamicMemoryOptimization(model_helper_obj, blobs_to_keep, devices)
model_helper_obj._data_parallel_model_init_nets = [
model_helper_obj.param_init_net,
]
model_helper_obj._data_parallel_model_nets = [
model_helper_obj.net
]
_AddBarrierToModelNets(model_helper_obj, barrier_net_timeout_sec)
if shared_model:
_RemapParameterBlobsForSharedModel(model_helper_obj, all_params)
def Parallelize_GPU_BMUF(*args, **kwargs):
kwargs['cpu_device'] = False
Parallelize_BMUF(*args, **kwargs)
def Parallelize_CPU_BMUF(*args, **kwargs):
kwargs['cpu_device'] = True
Parallelize_BMUF(*args, **kwargs)
def Parallelize_BMUF(
model_helper_obj,
input_builder_fun,
forward_pass_builder_fun,
param_update_builder_fun,
block_learning_rate=1.0,
block_momentum=None,
devices=None,
rendezvous=None,
net_type='dag',
master_device=None,
use_nccl=False,
nesterov=False,
optimize_gradient_memory=False,
reset_momentum_sgd=False,
warmup_iterations=None,
max_concurrent_distributed_ops=4,
add_blobs_to_sync=None,
num_threads_per_device=4,
cpu_device=False,
barrier_net_timeout_sec=_DEFAULT_BARRIER_NET_TIMEOUT_SEC,
):
'''
Function to create model that run on many GPUs and creates a net for
parameter_updates that can be run independently for number of iterations
then followed by another net that runs once to compute the final parameter
updates according to block wise model update filtering rule described
in : Scalable Training of Deep Learning Machines by Incremental Block
Training with Intra-block Parallel Optimization and Blockwise Model-Update
Filtering (ICASSP 2016).
'''
assert scope.CurrentDeviceScope() is None \
or scope.CurrentDeviceScope().device_type == caffe2_pb2.CPU, \
"Parallelize must be called without device-scope, \
device scope was: {}".format(scope.CurrentDeviceScope())
assert isinstance(model_helper_obj, model_helper.ModelHelper)
if devices is None:
devices = list(range(0, workspace.NumGpuDevices()))
if master_device is None:
master_device = devices[0]
if not cpu_device:
for gpu in devices:
if gpu >= workspace.NumGpuDevices():
log.warning("** Only {} GPUs available, GPUs {} requested".format(
workspace.NumGpuDevices(), devices))
break
model_helper_obj._device_type = workspace.GpuDeviceType
model_helper_obj._device_prefix = "gpu"
else:
model_helper_obj._device_type = caffe2_pb2.CPU
model_helper_obj._device_prefix = "cpu"
model_helper_obj._devices = devices
model_helper_obj._rendezvous = rendezvous
model_helper_obj._sync_barrier_net = None
model_helper_obj._broadcast_context = None
model_helper_obj._shared_model = False
master_dev_opt = core.DeviceOption(model_helper_obj._device_type, master_device)
# question: rendezvous structure
num_shards = rendezvous['num_shards'] if rendezvous else 1
# num_devices is #devices across all machines
num_devices = len(devices) * num_shards
# num_workers is #threads to execute the DAG per shard
num_workers = num_threads_per_device * len(devices)
if rendezvous:
num_workers += 8
loss_scale = 1.0 / num_devices
if block_momentum is None:
block_momentum = 1.0 - 1.0 / num_devices
max_concurrent_distributed_ops = min(
max_concurrent_distributed_ops,
num_workers - 1
)
model_helper_obj.net.Proto().num_workers = num_workers
model_helper_obj.net.Proto().type = net_type
# A net for initializing global model parameters. Its called once in the
# same step as net parameters initialization.
model_helper_obj._global_model_init_net = core.Net('global_model_init')
model_helper_obj._global_model_init_net.Proto().type = net_type
model_helper_obj._global_model_init_net.Proto().num_workers = \
num_workers
# A net for computing final parameter updates. Its will run once after
# running net (local models updates) for `num_local_iterations` times.
model_helper_obj._global_model_param_updates_net = core.Net('global_model')
model_helper_obj._global_model_param_updates_net.Proto().type = net_type
model_helper_obj._global_model_param_updates_net.Proto().num_workers = \
num_workers
def _v(param):
return "{}_v".format(param)
def _g(param):
return "{}_g".format(param)
def _v_prev(param):
return "{}_prev".format(param)
# Keep track of params that were in the model before: they are not
# data parallel, so we need to handle them separately
non_datapar_params = copy.copy(model_helper_obj.params)
model_helper_obj._losses_by_gpu = {}
def _InitializeModels(gpu_id):
input_builder_fun(model_helper_obj)
loss = forward_pass_builder_fun(model_helper_obj, loss_scale)
model_helper_obj._losses_by_gpu[gpu_id] = loss
_ForEachDevice(
devices,
_InitializeModels,
device_type=model_helper_obj._device_type,
device_prefix=model_helper_obj._device_prefix,
scoped=True
)
_ValidateParams(model_helper_obj.params)
model_helper_obj._device_grouped_blobs =\
_GroupByDevice(model_helper_obj, devices,
model_helper_obj.params, non_datapar_params)
model_helper_obj._param_names =\
list(viewkeys(model_helper_obj._device_grouped_blobs))
_AddGradientOperators(
devices, model_helper_obj, model_helper_obj._losses_by_gpu
)
_ValidateParams(model_helper_obj.params)
_InferBlobDevice(model_helper_obj)
def _InitializeParamUpdate(gpu_id):
param_update_builder_fun(model_helper_obj)
_ForEachDevice(
devices,
_InitializeParamUpdate,
device_type=model_helper_obj._device_type,
device_prefix=model_helper_obj._device_prefix,
scoped=True
)
model_parameter_names = list(
viewkeys(model_helper_obj._device_grouped_blobs)
)
if warmup_iterations is not None:
model_helper_obj._warmup_iterations = warmup_iterations
# A net for broadcasting gpu-0 (master shard) parameters after
# running net for `warmup_iterartions`.
model_helper_obj._warmup_broadcast = core.Net('warmup-broadcast')
model_helper_obj._warmup_broadcast.Proto().type = net_type
model_helper_obj._warmup_broadcast.Proto().num_workers = \
num_workers
_SyncAllParams(
devices,
model_helper_obj,
model_helper_obj.param_init_net,
model_helper_obj._warmup_broadcast,
rendezvous,
model_parameter_names,
max_concurrent_distributed_ops
)
for param_name in viewkeys(model_helper_obj._device_grouped_blobs):
param = model_helper_obj._device_grouped_blobs[param_name][master_device]
with core.DeviceScope(master_dev_opt):
model_helper_obj._warmup_broadcast.Copy(param, _g(param))
# (Step-0) Initialize momentum parameters on master device.
for param_name in viewkeys(model_helper_obj._device_grouped_blobs):
param = model_helper_obj._device_grouped_blobs[param_name][master_device]
with core.DeviceScope(master_dev_opt):
model_helper_obj._global_model_init_net.ConstantFill(
param, _v(param), value=0.0
)
model_helper_obj._global_model_init_net.Copy(param, _g(param))
if nesterov:
model_helper_obj._global_model_init_net.ConstantFill(
param, _v_prev(param), value=0.0
)
# (Step-1) Update models for num_local_iterations.
# (Step-2) Compute post-local-updates average of the params.
# Sum model params across GPUs and store resutls in param_avg blob.
_AllReduceBlobs(
model_parameter_names,
devices,
model_helper_obj,
model_helper_obj._global_model_param_updates_net,
rendezvous,
use_nccl,
max_concurrent_distributed_ops
)
# (Step-3) Update momentum params :
# param_v = block_momentum * param_v
# + block_learning_Rate * (param_avg - param)
# if nesterov momentum:
# param = param + param_v
# - block_momentum * (param_v - param_v_prev)
# param_v_prev = param_v
# else:
# param = param + param_v
for param_name in model_parameter_names:
param = model_helper_obj._device_grouped_blobs[param_name][master_device]
with core.DeviceScope(master_dev_opt):
# TODO(ataei) : Stop building the graph here to get model average ?
model_helper_obj._global_model_param_updates_net.Scale(
param, param, scale=1.0 / num_devices
)
model_helper_obj._global_model_param_updates_net.Sub(
[param, _g(param)], param
)
model_helper_obj._global_model_param_updates_net.Scale(
param, param, scale=block_learning_rate
)
model_helper_obj._global_model_param_updates_net.Scale(
_v(param), _v(param), scale=block_momentum
)
model_helper_obj._global_model_param_updates_net.Add(
[_v(param), param], _v(param)
)
model_helper_obj._global_model_param_updates_net.Add(
[_g(param), _v(param)], _g(param)
)
if nesterov:
model_helper_obj._global_model_param_updates_net.Sub(
[_v(param), _v_prev(param)], _v_prev(param)
)
model_helper_obj._global_model_param_updates_net.Scale(
_v_prev(param), _v_prev(param), scale=block_momentum
)
model_helper_obj._global_model_param_updates_net.Sub(
[_g(param), _v_prev(param)], _g(param)
)
model_helper_obj._global_model_param_updates_net.Copy(
_v(param), _v_prev(param)
)
model_helper_obj._global_model_param_updates_net.Copy(
_g(param), param
)
_SyncAllParams(
devices,
model_helper_obj,
model_helper_obj.param_init_net,
model_helper_obj._global_model_param_updates_net,
rendezvous,
model_parameter_names,
max_concurrent_distributed_ops
)
# Add additional syncs
if add_blobs_to_sync is not None:
AddBlobSync(
model_helper_obj,
add_blobs_to_sync,
net=model_helper_obj._global_model_param_updates_net)
# Reset momentum-SGD parameters
if reset_momentum_sgd:
momentum_ops = [op for op in model_helper_obj.net.Proto().op
if op.type == 'MomentumSGDUpdate']
for op in momentum_ops:
momentum_blob = op.input[1]
with core.DeviceScope(op.device_option):
model_helper_obj._global_model_param_updates_net.ConstantFill(
[momentum_blob], momentum_blob, value=0.0
)
if optimize_gradient_memory:
_OptimizeGradientMemorySimple(
model_helper_obj, model_helper_obj._losses_by_gpu, devices
)
model_helper_obj._data_parallel_model_init_nets = [
model_helper_obj.param_init_net,
model_helper_obj._global_model_init_net
]
model_helper_obj._data_parallel_model_nets = [
model_helper_obj.net,
(model_helper_obj._global_model_param_updates_net, 1)
]
_AddBarrierToModelNets(model_helper_obj, barrier_net_timeout_sec)
def CreateNet(model, overwrite=False):
for net_iters in model._data_parallel_model_nets:
if isinstance(net_iters, tuple):
workspace.CreateNet(net_iters[0], overwrite=overwrite)
else:
workspace.CreateNet(net_iters, overwrite=overwrite)
def RunInitNet(model):
for init_net in model._data_parallel_model_init_nets:
workspace.RunNetOnce(init_net)
CreateNet(model)
def RunWarmup(model):
workspace.RunNet(model.net, model._warmup_iterations)
workspace.RunNetOnce(model._warmup_broadcast)
def RunNet(model, num_iterations):
for net_iter in model._data_parallel_model_nets:
if isinstance(net_iter, tuple):
workspace.RunNet(net_iter[0].Proto().name, net_iter[1])
else:
workspace.RunNet(net_iter, num_iterations)
def _AddBarrierToModelNets(model, barrier_net_timeout_sec):
if model._rendezvous is not None and model._rendezvous['engine'] == 'GLOO':
# Synchronize DPM at the start of each epoch. This allows shards that
# starts an epoch sooner to wait for slower shards. Without this,
# shards that are faster than others will begin training the next epoch
# while stragglers are blocked on IO, and may timeout after 30 seconds
# (_DEFAULT_TIMEOUT_SEC).
# We pass in model.param_init_net so that the barrier net can be run as
# part of the param_init_net.
model._barrier_init_net = core.Net("barrier_init_net")
model._barrier_net = _CreateBarrierNet(model, model._barrier_init_net,
"pre_training", barrier_net_timeout_sec)
model._data_parallel_model_init_nets.insert(0, model._barrier_init_net)
model._data_parallel_model_nets.insert(0, model._barrier_net)
def _CreateBarrierNet(model, init_net, name_prefix, timeout_sec):
log.info("Creating barrier net")
assert model._rendezvous['engine'] == 'GLOO', "Engine does not support barrier"
comm_world = _CreateOrCloneCommonWorld(
init_net,
name_prefix + "_barrier_cw",
rendezvous=model._rendezvous,
timeout_sec=timeout_sec,
)
barrier_net = core.Net(name_prefix + "_barrier_net")
barrier_net.Barrier(
inputs=[comm_world],
outputs=[],
engine=model._rendezvous['engine'],
)
return barrier_net
# DEPRECATED: See warnings below.
def Synchronize(model, timeout_sec=_DEFAULT_BARRIER_NET_TIMEOUT_SEC):
warnings.warn("The Synchronize API has been deprecated. We now have a "
"barrier net which runs before training to ensure all hosts wait "
"before training starts. The default timeout for the barrier is "
"300s and it can be overridden using the barrier_net_timeout_sec "
"parameter when calling Parallelize.",
category=DeprecationWarning, stacklevel=2)
if model._rendezvous is None or model._rendezvous['num_shards'] <= 1:
# Single host case
return
if model._sync_barrier_net is None:
barrier_init_net = core.Net("sync_barrier_init_net")
model._sync_barrier_net = _CreateBarrierNet(
model, barrier_init_net, "sync", timeout_sec)
workspace.RunNetOnce(barrier_init_net)
workspace.CreateNet(model._sync_barrier_net)
model._sync_barrier_net_timeout = timeout_sec
assert model._sync_barrier_net_timeout == timeout_sec, \
"Must use fixed timeout, {} != {}".format(
model._sync_barrier_net_timeout, timeout_sec
)
log.info("Synchronize run barrier net.")
workspace.RunNet(model._sync_barrier_net)
def ConvertNetForDevice(net, device=None):
'''
Converts all blobs in the net to have namescope gpu_X, and correct
device scope. You can use this to enable AppendNet with a
forward_pass_builder_fun:
def builder_fun(model):
...
model.net.AppendNet(
data_parallel_model.ConvertNetForDevice(othermodel.net))
model.param_init_net.AppendNet(
data_parallel_model.ConvertNetForDevice(othermodel.param_init_net))
'''
mnet = copy.deepcopy(net)
if device is None:
device = scope.CurrentDeviceScope()
if core.IsGPUDeviceType(device.device_type):
device_prefix = "gpu"
elif device.device_type == caffe2_pb2.IDEEP:
device_prefix = "ideep"
else:
device_prefix = "cpu"
namescope = "{}_{}/".format(device_prefix, device.device_id)
for op in mnet.Proto().op:
if "RecurrentNetwork" in op.type:
raise NotImplementedError("RecurrentNetwork conversion not yet supported")
for i, inputb in enumerate(op.input):
op.input[i] = namescope + inputb
for i, outputb in enumerate(op.output):
op.output[i] = namescope + outputb
for i, blob in enumerate(op.control_input):
op.control_input[i] = namescope + blob
op.device_option.CopyFrom(device)
for i, einp in enumerate(mnet.Proto().external_input):
mnet.Proto().external_input[i] = namescope + einp
for i, eoutp in enumerate(mnet.Proto().external_output):
mnet.Proto().external_output[i] = namescope + eoutp
return mnet
def _ForEachDevice(devices, f, device_type, device_prefix, scoped=False,
*args, **kwargs):
for device in devices:
device_opt = core.DeviceOption(device_type, device)
with core.DeviceScope(device_opt):
if scoped:
with core.NameScope("{}_{}".format(device_prefix, device)):
f(device, *args, **kwargs)
else:
f(device, *args, **kwargs)
def _AddGradientOperators(devices, model, losses_by_gpu):
def create_grad(lossp):
return model.ConstantFill(lossp, str(lossp) + "_grad", value=1.0)
loss_grad = {}
# Explicitly need to create gradients on each GPU
for gpu_id in devices:
device = core.DeviceOption(model._device_type, gpu_id)
with core.DeviceScope(device):
for l in losses_by_gpu[gpu_id]:
lg = create_grad(l)
loss_grad[str(l)] = str(lg)
model.AddGradientOperators(loss_grad)
def ExtractPredictorNet(model, inputs, outputs, device):
'''
Returns (net, params) that can be exported to be used as a prediction
net.
'''
master_device = model._devices[0]
prefix = "{}_{}/".format(model._device_prefix, master_device)
prefix_inputs = [prefix + str(b) for b in inputs]
prefix_outputs = [prefix + str(b) for b in outputs]
(predictor_net, export_blobs) = model_helper.ExtractPredictorNet(
net_proto=model.net.Proto(),
input_blobs=prefix_inputs,
output_blobs=prefix_outputs,
device=device,
renames={
a: b
for (a, b) in zip(prefix_inputs + prefix_outputs, inputs + outputs)
},
)
return (predictor_net, export_blobs)
def GetCheckpointParams(model):
'''
Returns a set of blobs that are needed for a complete check point.
They are blobs for the first gpu and iteration blobs.
'''
(all_blobs, _) = _ComputeBlobsToSync(model)
first_gpu_blobs = {
b
for b in all_blobs
if str(b)
.startswith("{}_{}/".format(model._device_prefix, model._devices[0]))
}
# Add iteration blobs that do not have namescope separately, since
# it is important to checkpoint iteration counter
iteration_blobs = set()
for op in model.net.Proto().op:
if op.type == 'Iter' or op.type == 'AtomicIter':
if not op.output[0].startswith("{}_".format(model._device_prefix)):
iteration_blobs.add(op.output[0])
return first_gpu_blobs.union(iteration_blobs)
def FinalizeAfterCheckpoint(model, blobs=None, cpu_mode=False):
'''
This function should be called after loading parameters from a
checkpoint / initial parameters file.
'''
if not hasattr(model, "_checkpoint_net"):
if blobs is None:
(_, uniq_blob_names) = _ComputeBlobsToSync(model)
else:
uniq_blob_names = [stripBlobName(p) for p in blobs]
# Synchronize to the blob lookup map, as the provided
# blobs might have non-parameters, such as momentum blobs.
log.info("Creating checkpoint synchronization net")
devices = model.GetDevices()
for name in uniq_blob_names:
if name not in model._device_grouped_blobs:
grouped = {
d:
core.BlobReference("{}_{}{}{}".format(
model._device_prefix,
d,
scope._NAMESCOPE_SEPARATOR,
name)
) for d in devices}
model._device_grouped_blobs[name] = grouped
model._checkpoint_net = core.Net("checkpoint_sync_net")
if not cpu_mode:
model._checkpoint_net.RunAllOnGPU()
checkpoint_init_net = None
if (model._rendezvous is not None and model._rendezvous['num_shards'] > 1):
checkpoint_init_net = core.Net("checkpoint_init_net")
if not cpu_mode:
checkpoint_init_net.RunAllOnGPU()
_SyncAllParams(
devices,
model,
checkpoint_init_net,
model._checkpoint_net,
model._rendezvous,
uniq_blob_names,
max_concurrent_distributed_ops=1
)
if (checkpoint_init_net):
workspace.RunNetOnce(checkpoint_init_net)
workspace.CreateNet(model._checkpoint_net)
# Run the sync
log.info("Run checkpoint net")
workspace.RunNet(model._checkpoint_net.Proto().name)
def GetLearningRateBlobNames(model):
'''
Returns a list of learning rates blob names used in the optimizer.
'''
if model._optimizer is not None:
if model._device_type == caffe2_pb2.CPU or model._device_type == caffe2_pb2.IDEEP:
return [model._optimizer.get_cpu_blob_name('lr')]
elif core.IsGPUDeviceType(model._device_type):
return [model._optimizer.get_gpu_blob_name('lr', gpu, '')
for gpu in model._devices]
else:
raise Exception(
"Unsupported device type : {}".format(model._device_type)
)
else:
lr_blob_names = []
for op in model.net.Proto().op:
if op.type == "LearningRate":
lr_blob_names.append(op.output(0))
return lr_blob_names
def _Broadcast(devices, model, net, param, use_nccl=False):
# Copy params from gpu_0 to other
master_dev = devices[0]
if use_nccl:
if _IsGPUBlob(model, param):
master_device_opt = core.DeviceOption(model._device_type, master_dev)
with core.DeviceScope(master_device_opt):
# Note that the root is the root _rank_ and not the root
# _device_. Thus we always use root=0, regardless of the
# devices used.
net.NCCLBroadcast(
list(viewvalues(model._device_grouped_blobs[param])),
list(viewvalues(model._device_grouped_blobs[param])),
root=0,
)
return
for dev_idx in devices[1:]:
if _IsGPUBlob(model, param):
device_opt = core.DeviceOption(workspace.GpuDeviceType, dev_idx)
else:
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0) if _IsIDEEPBlob(model, param) else \
core.DeviceOption(caffe2_pb2.CPU, 0)
with core.DeviceScope(device_opt):
net.Copy(
model._device_grouped_blobs[param][master_dev],
model._device_grouped_blobs[param][dev_idx]
)
def _AllReduce(devices, model, net, param, use_nccl=False, control_input=None):
blobs_group = list(viewvalues(model._device_grouped_blobs[param]))
if model._device_type == caffe2_pb2.CUDA and use_nccl:
# TODO: for _shared_model, do only NCCLReduce
model.NCCLAllreduce(
blobs_group, blobs_group, control_input=control_input
)
return
if model._device_type == workspace.GpuDeviceType:
p2p_access_pattern = workspace.GetGpuPeerAccessPattern()
else:
p2p_access_pattern = None
def sumN(*dev_indices):
"""Create a Sum op for 2 or more blobs on different devices.
Saves the result on the first device.
Args:
dev_indices -- a list of device indices, which can be translated into
CUDA identifiers with model._devices
"""
devices = [model._devices[idx] for idx in dev_indices]
blobs = [blobs_group[idx] for idx in dev_indices]
device_opt = core.DeviceOption(model._device_type, devices[0])
with core.DeviceScope(device_opt):
for i, peer in enumerate(devices):
if i == 0:
continue # Skip the first device
if p2p_access_pattern is not None and p2p_access_pattern.size and not p2p_access_pattern[
devices[0], peer
]:
# Copy from peer to d0
blobs[i] = model.Copy(
blobs[i],
'gpu_{}/{}_gpu{}_copy'.format(devices[0], param, peer)
)
net.Sum(blobs, [blobs[0]], name='dpm')
if len(devices) == 16:
# Special tree reduction for 16 gpus, TODO generalize like in muji.py
for j in range(8):
sumN(j * 2, j * 2 + 1)
for j in range(4):
sumN(j * 4, j * 4 + 2)
for j in range(2):
sumN(j * 8, j * 8 + 4)
sumN(0, 8)
elif len(devices) == 8:
for j in range(4):
sumN(j * 2, j * 2 + 1)
for j in range(2):
sumN(j * 4, j * 4 + 2)
sumN(0, 4)
elif len(devices) == 4:
sumN(0, 1)
sumN(2, 3)
sumN(0, 2)
else:
sumN(*range(len(devices)))
# TODO: for _shared_model, no need to broadcast
_Broadcast(devices, model, net, param)
def _SyncAllParams(
devices,
model,
init_net,
net,
rendezvous,
unique_param_names,
max_concurrent_distributed_ops=4
):
if rendezvous is None or rendezvous['num_shards'] <= 1:
_SyncAllParamsSingleHost(devices, model, net, unique_param_names)
else:
_SyncAllParamsDistributed(
devices,
model,
init_net,
net,
rendezvous,
unique_param_names,
max_concurrent_distributed_ops
)
def AddBlobSync(model, blobs, net=None):
'''
Sync a blob across devices and hosts
'''
if len(blobs) == 0:
return
net = model.net if net is None else net
for b in blobs:
assert not b.startswith(model._device_prefix), \
"Provide unprefixed blob name: {}".format(b)
model._device_grouped_blobs[b] = {
d: core.BlobReference("{}_{}/{}".format(model._device_prefix, d, b))
for d in model._devices
}
_SyncAllParams(
model._devices,
model,
model.param_init_net,
net,
model._rendezvous,
set(blobs))
def AddDistributedBlobSync(model, blobs):
'''
Sync blobs across machines (but not across devices)
'''
if model._rendezvous is None:
return
synth_name = "_".join([str(b) for b in blobs])
comm_world = _CreateOrCloneCommonWorld(
model.param_init_net,
"blob_sync_cw_" + synth_name,
rendezvous=model._rendezvous,
)
model.net.Allreduce(
inputs=[comm_world] + blobs,
outputs=blobs,
engine=model._rendezvous['engine'],
)
def _SyncAllParamsDistributed(
devices,
model,
init_net,
net,
rendezvous,
unique_param_names,
max_concurrent_distributed_ops
):
assert rendezvous['num_shards'] > 1
gpu_device_opt = core.DeviceOption(model._device_type, devices[0])
cpu_device_opt = core.DeviceOption(caffe2_pb2.CPU)
ideep_device_opt = core.DeviceOption(caffe2_pb2.IDEEP)
if model._broadcast_context is None:
model._broadcast_context = CollectivesConcurrencyControl(
"broadcast",
max_concurrent_distributed_ops,
init_net,
rendezvous
)
context = model._broadcast_context
for param_name in sorted(unique_param_names):
master_param = model._device_grouped_blobs[param_name][devices[0]]
params_group = list(viewvalues(model._device_grouped_blobs[param_name]))
def broadcast(params):
comm_world, control_input = context.get_control_and_context(params)
net.Broadcast(
inputs=[comm_world] + params,
outputs=params,
name=param_name,
engine=rendezvous['engine'],
control_input=control_input
)
device_opt = gpu_device_opt if _IsGPUBlob(
model, param_name
) else ideep_device_opt if _IsIDEEPBlob(model, param_name) else cpu_device_opt
if rendezvous['engine'] == 'GLOO':
with core.DeviceScope(device_opt):
broadcast(params_group)
else:
# Copy between GPU and CPU
with core.DeviceScope(device_opt):
param_cpu = net.CopyGPUToCPU(
master_param,
str(master_param) + "cpu"
)
with core.DeviceScope(cpu_device_opt):
broadcast([param_cpu])
with core.DeviceScope(device_opt):
net.CopyCPUToGPU(param_cpu, master_param)
# Broadcast locally
_Broadcast(devices, model, net, param_name)
def _SyncAllParamsSingleHost(devices, model, net, unique_param_names):
for param in unique_param_names:
_Broadcast(devices, model, net, param)
def _AllReduceBlobs(blob_names, devices, model, net, rendezvous, use_nccl,
max_concurrent_distributed_ops):
if rendezvous is None or rendezvous['num_shards'] <= 1:
_AllReduceBlobsSingleHost(
blob_names,
devices,
model,
net,
use_nccl
)
else:
_AllReduceBlobsDistributed(
blob_names,
devices,
model,
net,
rendezvous,
max_concurrent_distributed_ops,
)
def _PruneParametersForSharing(model):
assert model._shared_model
master_prefix = "{}_{}/".format(model._device_prefix, model._devices[0])
# Remove non-master parameters so that they will not receive parameter
# update operators.
model.params = model.GetParams(master_prefix)
paramset = set(model.params)
model.param_to_grad = {
p: model.param_to_grad[p]
for p in model.param_to_grad if p in paramset
}
model.weights = [w for w in model.weights if w in paramset]
model.biases = [w for w in model.biases if w in paramset]
def _RemapParameterBlobsForSharedModel(model, all_params):
assert model._shared_model
master_prefix = "{}_{}/".format(
model._device_prefix, model._devices[0])
log.info("Remapping param blobs to master -> {}".format(master_prefix))
master_params = set(model.GetParams())
# Remove all but master params
def modify_ops(net):
ops = []
for op in net.Proto().op:
delete_op = False
# Delete ops that output non-master version of parameter
for outp in op.output:
if outp in all_params and outp not in master_params:
delete_op = True
log.debug("Delete b/c {}: {}".format(outp, str(op)))
break
if delete_op:
continue
# Remap inputs to point to the master param
for j, inp in enumerate(op.input):
if inp in all_params and inp not in master_params:
op.input[j] = master_prefix + stripBlobName(inp)
ops.append(op)
del net.Proto().op[:]
net.Proto().op.extend(ops)
modify_ops(model.param_init_net)
modify_ops(model.net)
class CollectivesConcurrencyControl(object):
"""
Creates common worlds (up to max_concurrent_context) and manage the
sequential execution of collectives that shares the same context with
cyclic control inputs.
"""
def __init__(
self,
name,
max_concurrent_context,
param_init_net,
rendezvous
):
self.name = name
self.param_init_net = param_init_net
self.max_concurrent_context = max_concurrent_context
self.counter = 0
self.common_worlds = []
self.control_inputs = []
self.rendezvous = rendezvous
def get_control_and_context(self, control_output_blob):
common_world, control_input = [None, None]
current_slot = self.counter % self.max_concurrent_context
if len(self.common_worlds) < self.max_concurrent_context:
common_world = _CreateOrCloneCommonWorld(
self.param_init_net,
"{}_{}_cw".format(self.name, current_slot),
rendezvous=self.rendezvous,
)
self.common_worlds.append(common_world)
self.control_inputs.append(control_output_blob)
else:
common_world = self.common_worlds[current_slot]
control_input = self.control_inputs[current_slot]
self.control_inputs[current_slot] = control_output_blob
self.counter += 1
return common_world, control_input
def _AllReduceBlobsDistributed(
blob_names,
devices,
model,
net,
rendezvous,
max_concurrent_distributed_ops,
):
num_workers = model.net.Proto().num_workers
assert num_workers > 1, "Please specify more than 1 worker"
all_reduce_engine = rendezvous['engine']
master_device_opt = core.DeviceOption(model._device_type, devices[0])
reducing_device_opt = master_device_opt
context = CollectivesConcurrencyControl(
"allreduce",
max_concurrent_distributed_ops,
model.param_init_net,
rendezvous
)
nccl_control_blob = None
for blob_name in blob_names:
master_blob = model._device_grouped_blobs[blob_name][devices[0]]
blobs_group = list(viewvalues(model._device_grouped_blobs[blob_name]))
assert master_blob in blobs_group
# Remark: NCCLReduce does not support in-place modifications
# so we need a temporary blob
reduced_blob = str(master_blob) + "_red"
def allreduce(blobs, **kwargs):
with core.DeviceScope(reducing_device_opt):
comm_world, control_input = \
context.get_control_and_context(blobs[0])
net.Allreduce(
inputs=[comm_world] + blobs,
outputs=blobs,
name=blob_name,
engine=all_reduce_engine,
control_input=control_input,
**kwargs
)
if rendezvous['engine'] == 'GLOO':
# With Gloo cross GPU and cross machine allreduce
# can be executed in a single operation.
# Try to use GPUDirect if transport == ibverbs.
allreduce(
blobs_group,
gpu_direct=(rendezvous.get("transport", None) == "ibverbs"),
)
else:
# Step 1: sum blobs from local GPUs to master GPU
with core.DeviceScope(master_device_opt):
model.ConstantFill(master_blob, reduced_blob, value=0.0)
# Temp fix since NCCLReduce does not work
net.NCCLAllreduce(
blobs_group,
blobs_group,
control_input=nccl_control_blob,
)
nccl_control_blob = blobs_group[0]
net.Copy(master_blob, reduced_blob)
# Step 2: allreduce between all hosts, between master GPUs
allreduce([reduced_blob])
with core.DeviceScope(master_device_opt):
net.Copy(reduced_blob, master_blob)
# Step 3: broadcast locally
_Broadcast(devices, model, net, blob_name)
def _AllReduceBlobsSingleHost(blob_names, devices, model, net, use_nccl):
"""Performs NCCL AllReduce to distribute blobs to all the GPUs."""
if len(devices) == 1:
return
# Now we need to Allreduce blobs on all the GPUs.
# Pick GPU #0 as a master GPU.
master_device_opt = core.DeviceOption(model._device_type, devices[0])
last_out = None
concatenated_idx = set()
for blob_name in blob_names:
# Group by blob_name for reduce.
blobs_group = list(viewvalues(model._device_grouped_blobs[blob_name]))
if len(blobs_group) == 1:
# Non-reducible
continue
assert len(blobs_group) == len(devices), \
"Each GPU from {}, should have a copy of {}.".format(
devices, blob_name)
if _IsGPUBlob(model, blob_name):
with core.DeviceScope(master_device_opt):
if not isinstance(blobs_group[0], core.GradientSlice):
_AllReduce(
devices, model, net, blob_name, use_nccl, last_out
)
# last_out is used to serialize the execution of nccls
last_out = blobs_group[0]
else:
# Sparse gradients: all-gather for indices and values
master_ns = "{}_{}".format(model._device_prefix, devices[0])
'''
Skip if we have already copied concatenated indices
to the indices of GradientSlice. This happens when two
or more grad blobs are gathered with the same indices
blob
'''
skip_idx_concat = False
for g in blobs_group:
if g.indices in concatenated_idx:
skip_idx_concat = True
if not skip_idx_concat:
grad_idx_concat, _ = net.Concat(
[g.indices for g in blobs_group],
["{}/{}_index_concat".format(master_ns, blob_name),
"{}/{}_index_splitinfo".format(master_ns, blob_name)],
axis=0,
name="note:data_parallel_model")
for gpu, g in viewitems(model._device_grouped_blobs[blob_name]):
device_opt = core.DeviceOption(model._device_type, gpu)
with core.DeviceScope(device_opt):
model.Copy(grad_idx_concat, g.indices)
concatenated_idx.add(g.indices)
grad_val_concat, _ = net.Concat(
[g.values for g in blobs_group],
["{}/{}_val_concat".format(master_ns, blob_name),
"{}/{}_val_splitinfo".format(master_ns, blob_name)],
axis=0, name="note:data_parallel_model")
for gpu, g in viewitems(model._device_grouped_blobs[blob_name]):
device_opt = core.DeviceOption(model._device_type, gpu)
with core.DeviceScope(device_opt):
model.Copy(grad_val_concat, g.values)
elif _IsIDEEPBlob(model, blob_name):
assert not isinstance(blobs_group[0], core.GradientSlice), \
"Synchronizing gradient slices not supported"
with core.DeviceScope(core.DeviceOption(caffe2_pb2.IDEEP)):
net.Sum(blobs_group, [blobs_group[0]])
if not model._shared_model:
_Broadcast(devices, model, net, blob_name)
else:
assert not isinstance(blobs_group[0], core.GradientSlice), \
"Synchronizing gradient slices not supported"
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
# Poor man's allreduce
net.Sum(blobs_group, [blobs_group[0]])
if not model._shared_model:
_Broadcast(devices, model, net, blob_name)
def _BroadcastComputedParams(devices, model, rendezvous, use_nccl=False):
if rendezvous is None:
_BroadcastComputedParamsSingleHost(devices, model, use_nccl)
else:
_BroadcastComputedParamsDistributed(devices, model, rendezvous, use_nccl)
def _BroadcastComputedParamsDistributed(
devices,
model,
rendezvous,
use_nccl=False
):
_BroadcastComputedParamsSingleHost(devices, model, use_nccl)
log.warn("Distributed broadcast of computed params is not implemented yet")
def _BroadcastComputedParamsSingleHost(devices, model, use_nccl=False):
'''
Average computed params over all devices
'''
if len(devices) == 1:
return
for param_name in model._computed_param_names:
# Copy from master to others -- averaging would be perhaps better,
# but currently NCCLAllReduce is too prone to deadlock
_Broadcast(devices, model, model.net, param_name, use_nccl)
def _GetReverseOrderedGrads(model):
'''
Returns the gradients in reverse order (namespace stripped),
for the optimal synchronization order.
'''
return list(reversed(model._grad_names))
# A helper function to extract a parameter's name
def stripBlobName(param):
# Format is "a/b/c/d" -> "b/c/d"
if isinstance(param, core.GradientSlice):
return stripBlobName(param.indices) + ":" + stripBlobName(param.values)
else:
name = str(param)
return name[name.index(scope._NAMESCOPE_SEPARATOR) + 1:]
def _AnalyzeOperators(model):
'''
Look at all the operators and check that they do not cross device scopes
'''
for op in model.Proto().op:
if "NCCL" in op.type or "Copy" in op.type or "Concat" in op.type:
continue
if "Sum" == op.type and op.name == "dpm":
continue
if "Allreduce" in op.type and "GLOO" in op.engine:
continue
op_dev = op.device_option
op_gpu = op_dev.device_id
# This avoids failing on operators that are only for CPU
if not core.IsGPUDeviceType(op_dev.device_type):
continue
namescope = "{}_{}/".format(model._device_prefix, op_gpu)
for inp in list(op.input) + list(op.output):
if inp.startswith("{}_".format(model._device_prefix)
) and not inp.startswith(namescope):
raise Exception(
"Blob {} of op {}, should have namescope {}. Op: {}".format(
inp,
op.type,
"{}_{}/".format(model._device_prefix, op_gpu),
str(op),
)
)
def _InferBlobDevice(model):
'''
Assign blob to device option based on the operator outputing it
'''
mapping = {}
def map_ops(proto):
for op in proto.op:
device_option = op.device_option
if op.type == "Iter":
# Hack for Iters which have blob in CPU context
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CPU
for b in list(op.input) + list(op.output):
if b not in mapping:
mapping[b] = device_option
if op.type.startswith('RecurrentNetwork'):
step_args = [a for a in op.arg if a.name.endswith("step_net")]
for step_arg in step_args:
map_ops(step_arg.n)
map_ops(model.param_init_net.Proto())
map_ops(model.net.Proto())
model._blob_to_device = mapping
def _IsIDEEPBlob(model, blob_name):
if blob_name in model._blob_to_device:
return model._blob_to_device[blob_name].device_type == caffe2_pb2.IDEEP
else:
blob_name = "{}_{}/{}".format(
model._device_prefix, model._devices[0], blob_name
)
if blob_name not in model._blob_to_device:
return model._device_type == caffe2_pb2.IDEEP
return model._blob_to_device[blob_name].device_type == caffe2_pb2.IDEEP
def _IsGPUBlob(model, blob_name):
if blob_name in model._blob_to_device:
return core.IsGPUDeviceType(model._blob_to_device[blob_name].device_type)
else:
blob_name = "{}_{}/{}".format(
model._device_prefix, model._devices[0], blob_name
)
if blob_name not in model._blob_to_device:
return core.IsGPUDeviceType(model._device_type)
return core.IsGPUDeviceType(model._blob_to_device[blob_name].device_type)
def _GroupByDevice(model, devices, params, non_data_params):
'''
Groups blobs by device, returning a map of [blobname] = {0: BlobRef, 1: ..}.
Returns ordered dictionary, ensuring the original order.
'''
grouped = OrderedDict()
# Only consider params that were created to be "data parallel"
params = params[len(non_data_params):]
for _i, p in enumerate(params):
assert isinstance(p, core.BlobReference) or \
isinstance(p, core.GradientSlice), \
"Param {} is not BlobReference or GradientSlice".format(p)
name = stripBlobName(p)
gpuid = None
if isinstance(p, core.BlobReference):
gpuid = int(p.GetNameScope().split("_")[1].split("/")[0])
assert "{}_{}/".format(model._device_prefix, gpuid) in p.GetNameScope(),\
"Param {} expected to have namescope '{}_{}'".format(str(p), model._device_prefix, gpuid)
else:
gpuid = int(p.indices.GetNameScope().split("_")[1].split("/")[0])
assert "{}_{}/".format(model._device_prefix, gpuid) in p.indices.GetNameScope(),\
"Indices {} expected to have namescope '{}_{}'".format(str(p), model._device_prefix, gpuid)
assert "{}_{}/".format(model._device_prefix, gpuid) in p.values.GetNameScope(),\
"Values {} expected to have namescope '{}_{}'".format(str(p), model._device_prefix, gpuid)
if name not in grouped:
grouped[name] = {}
grouped[name][gpuid] = p
return grouped
def _ValidateParams(params):
set_params = set(params)
if len(params) > len(set_params):
dupes = []
sp = sorted(params)
for j, p in enumerate(sp):
if j > 0 and sp[j - 1] == p:
dupes.append(p)
assert len(params) == len(set_params), \
"Duplicate entries in params: {}".format(dupes)
def _ComputeBlobsToSync(model):
'''
We sync all blobs that are generated by param init net and
are 'data parallel', i.e assigned to a device
'''
sync_names = set()
# We don't sync params if the model is shared
if model._shared_model:
blobs_to_sync = [str(p) for p in model.GetComputedParams('')]
sync_names = [stripBlobName(p) for p in blobs_to_sync]
else:
blobs_to_sync = []
for op in model.param_init_net.Proto().op:
dp_outputs = [
o for o in op.output
if o.startswith("{}_".format(model._device_prefix))
]
sync_names.update([stripBlobName(o) for o in dp_outputs])
blobs_to_sync.extend(dp_outputs)
# Sanity check
diff = set(model._param_names) - sync_names
assert diff == set(), \
"Some params not instantiated in param init net: {}".format(diff)
# Remove duplicates and sort
prefixlen = len(model._device_prefix) + 1
def extract_sort_key(b):
# Sort first based on device id, and then by whole string
deviceid = int(b[prefixlen:b.index(scope._NAMESCOPE_SEPARATOR)])
return (deviceid, b)
blobs_to_sync = sorted(
list(set(blobs_to_sync)),
key=extract_sort_key)
blobs_to_sync = [core.BlobReference(b) for b in blobs_to_sync]
return (blobs_to_sync, sync_names)
def _OptimizeGradientMemorySimple(model, losses_by_gpu, devices):
log.warning("------- DEPRECATED API, please use " +
"data_parallel_model.OptimizeGradientMemory() ----- ")
for device in devices:
namescope = "{}_{}/".format(model._device_prefix, device)
model.net._net = memonger.share_grad_blobs(
model.net,
losses_by_gpu[device],
set(viewvalues(model.param_to_grad)),
namescope,
share_activations=False,
)
def _AddDynamicMemoryOptimization(model, blobs_to_keep, devices):
blobs_to_keep_all_devices = set()
if blobs_to_keep is not None:
for device in devices:
for blob_name in blobs_to_keep:
blobs_to_keep_all_devices.add(
"{}_{}/{}".format(model._device_prefix, device, blob_name)
)
if model._rendezvous is not None:
# GLOO operators expect the tensor addresses to remain same over
# iterations so we need to remove param grads from the dynamic memory
# management.
blobs_to_keep_all_devices.update(
[str(b) for b in viewvalues(model.param_to_grad)]
)
model.net._net = memonger.release_blobs_when_used(
model.net.Proto(),
blobs_to_keep_all_devices
)
def OptimizeGradientMemory(model,
input_shapes,
excluded_blobs,
recycle_activations):
"""
Optimize memory usage of the backward pass by recycling blobs for gradient
inputs that have been 'used'.
input_shapes: dict of blob name to shape for the inputs of the model.
Pass empty dictionary if not known.
excluded_blobs: list of blobs that cannot be recycled. These are blobs
that you will access externally.
recycle_activations: whether to also recycle forward pass activations
"""
if input_shapes is not None:
input_shapes_all_devices = {}
for b, shp in viewitems(input_shapes):
for d in model._devices:
input_shapes_all_devices["{}_{}/{}".
format(model._device_prefix, d, b)] = shp
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
input_shapes_all_devices,
)
else:
shapes = None
for device in model._devices:
namescope = "{}_{}/".format(model._device_prefix, device)
excluded_blobs_by_device = set(namescope + b for b in excluded_blobs)
model.net._net = memonger.share_grad_blobs(
model.net,
model._losses_by_gpu[device],
set(viewvalues(model.param_to_grad)),
namescope,
dont_share_blobs=excluded_blobs_by_device,
share_activations=recycle_activations,
blob_shapes=shapes,
)
def _CreateOrCloneCommonWorld(
net,
common_world_blob,
rendezvous,
name=None,
timeout_sec=None):
if timeout_sec is None:
timeout_sec = _DEFAULT_TIMEOUT_SEC
timeout_ms = timeout_sec * 1000
# Check if there is an existing CreateCommonWorld
# with the same timeout we're looking for. If so,
# we can clone it instead of creating a new one.
existing = None
for op in net.Proto().op:
if op.type != "CreateCommonWorld":
continue
# Find common world timeout
op_timeout_ms = -1
for arg in op.arg:
if arg.name == 'timeout_ms':
op_timeout_ms = arg.i
break
if op_timeout_ms != timeout_ms:
continue
# This common world was created with the same timeout we're
# looking for, so we can clone it
existing = op.output[0]
break
if name is None:
name = "{}_op".format(common_world_blob)
if existing is not None:
comm_world = net.CloneCommonWorld(
[existing],
common_world_blob,
name=name,
engine=rendezvous['engine'],
)
else:
kwargs=dict()
if 'transport' in rendezvous:
kwargs['transport'] = rendezvous['transport']
if 'interface' in rendezvous:
kwargs['interface'] = rendezvous['interface']
if 'mpi_rendezvous' in rendezvous:
kwargs['mpi_rendezvous'] = rendezvous['mpi_rendezvous']
comm_world = net.CreateCommonWorld(
rendezvous['kv_handler'] or [],
common_world_blob,
name=name,
size=rendezvous['num_shards'],
rank=rendezvous['shard_id'],
engine=rendezvous['engine'],
timeout_ms=timeout_ms,
**kwargs
)
return comm_world
def _RunComparison(model, blob_name, device=None):
if device is None:
device = model._blob_to_device[blob_name]
with core.DeviceScope(device):
rendezvous = model._rendezvous
if rendezvous is None or rendezvous['num_shards'] == 1:
return True
test_data_arr = np.zeros(rendezvous['num_shards']).astype(np.float32)
test_data_arr[rendezvous['shard_id']] = 1
workspace.FeedBlob("compare_arr", test_data_arr)
comparison_net = core.Net("allcompare_net")
kwargs=dict()
if 'mpi_rendezvous' in rendezvous:
kwargs['mpi_rendezvous'] = rendezvous['mpi_rendezvous']
comm_world = comparison_net.CreateCommonWorld(
rendezvous['kv_handler'] or [],
"initial_sync",
name=model.net.Proto().name + ".cw_master_select",
size=rendezvous['num_shards'],
rank=rendezvous['shard_id'],
engine=rendezvous['engine'],
**kwargs
)
blob_name_checksum = blob_name + "_checksum"
comparison_net.SumSqrElements(
[blob_name], [blob_name_checksum], average=False
)
blob_name_gather = blob_name + "_gather"
comparison_net.Mul(
inputs=["compare_arr", blob_name_checksum],
outputs=blob_name_gather,
broadcast=1
)
comparison_net.Allreduce(
inputs=[comm_world, blob_name_gather],
outputs=[blob_name_gather],
engine=rendezvous['engine'],
)
workspace.RunNetOnce(comparison_net)
gather_arr = workspace.FetchBlob(blob_name_gather)
baseline = gather_arr[0]
for i in range(rendezvous['num_shards']):
assert gather_arr[i] == baseline, \
"allcompare failed on shard {}.".format(rendezvous['shard_id'])
return True
def _InterleaveOps(model):
'''
Data Parallel Model creates a net with ops in one device grouped together.
This will interleave the ops so that each op for each device is next
to each other in the net. Kind of like combining decks of cards. This
ensures that progress is made along the critical path roughly concurrently
for each device, which is important due to the extra intra-node
synchronization required for multi-device batch normalization.
'''
orig_ops = list(model.net.Proto().op)
num_devices = len(model._devices)
num_ops_per_dev = len(orig_ops) // num_devices
assert num_devices * num_ops_per_dev == len(orig_ops), \
'Number of ops per device in original net is not uniform'
new_ops = []
ops = {d: [] for d in range(num_devices)}
for op in orig_ops:
ops[op.device_option.device_id].append(op)
for j in range(num_ops_per_dev):
tp = None
for d in model._devices:
if tp is None:
tp = ops[d][j].type
new_ops.append(ops[d][j])
# Sanity
assert ops[d][j].type == tp, \
"Type mismatch {} / {}".format(tp, ops[d][j].type)
del model.net.Proto().op[:]
model.net.Proto().op.extend(new_ops)
def _CPUInterDeviceBatchNormalization(model):
orig_ops = list(model.net.Proto().op)
new_ops = []
num_devices = len(model._devices)
batch_norm_ops = []
injected_ops = []
spatial_bn_phase = False
sums_blobs = []
sumsq_blobs = []
name = []
input_blob_name = None
spatial_bn_gradient_phase = False
scale_grad_blobs = []
bias_grad_blobs = []
def _cpuReduce(param, input_blobs, destination_blobs):
"""
Reduce results from multiple cpus and distributes the results back
to each device. This is done by copying values to cpu_0 and summing
them. The cpu_0 result is then copied back to each of the devices.
param: the name of the data (blobs) to reduce
input_blobs: the list of blobs to reduce
destination_blobs: list of blobs to copy the result to
"""
added_ops = []
result_blob = "cpu_0/" + param + "_combined"
added_ops.append(core.CreateOperator("Sum", input_blobs, result_blob))
for blob in destination_blobs:
added_ops.append(core.CreateOperator("Copy", result_blob, blob))
return added_ops
for op in orig_ops:
if op.type != 'SpatialBN' and op.type != 'SpatialBNGradient':
if spatial_bn_phase:
new_ops.extend(injected_ops)
new_ops.append(
core.CreateOperator("Sum",
sums_blobs,
input_blob_name + "_sums_combined"))
new_ops.append(
core.CreateOperator("Sum",
sumsq_blobs,
input_blob_name + "_sumsq_combined"))
new_ops.extend(batch_norm_ops)
injected_ops = []
batch_norm_ops = []
sums_blobs = []
sumsq_blobs = []
spatial_bn_phase = False
input_blob_name = None
elif spatial_bn_gradient_phase:
new_ops.extend(injected_ops)
new_ops.extend(_cpuReduce(
stripBlobName(scale_grad_blobs[0]),
scale_grad_blobs,
scale_grad_blobs))
new_ops.extend(_cpuReduce(
stripBlobName(bias_grad_blobs[0]),
bias_grad_blobs,
bias_grad_blobs))
new_ops.extend(batch_norm_ops)
injected_ops = []
batch_norm_ops = []
scale_grad_blobs = []
bias_grad_blobs = []
spatial_bn_gradient_phase = False
new_ops.append(op)
elif op.type == 'SpatialBN':
spatial_bn_phase = True
if input_blob_name is None:
input_blob_name = op.input[0]
name = op.input[0]
injected_ops.append(
core.CreateOperator(
"ChannelStats",
name,
[name + "_sums", name + "_sumsq"]))
sums_blobs.append(name + "_sums")
sumsq_blobs.append(name + "_sumsq")
op.input.append(input_blob_name + "_sums_combined")
op.input.append(input_blob_name + "_sumsq_combined")
op.arg.extend([utils.MakeArgument("num_batches", num_devices)])
batch_norm_ops.append(op)
elif op.type == 'SpatialBNGradient':
spatial_bn_gradient_phase = True
injected_ops.append(
core.CreateOperator("ChannelBackpropStats",
[op.input[0], op.input[3], op.input[4],
op.input[2]],
[op.output[1], op.output[2]]))
scale_grad_blobs.append(op.output[1])
bias_grad_blobs.append(op.output[2])
op.arg.extend([utils.MakeArgument("num_batches", num_devices)])
op.input.extend([op.output[1], op.output[2]])
batch_norm_ops.append(op)
assert not spatial_bn_phase, \
"Net modification for cpu inter-device batch normalization failed"
del model.net.Proto().op[:]
model.net.Proto().op.extend(new_ops)
def _GPUInterDeviceBatchNormalization(model):
orig_ops = list(model.net.Proto().op)
new_ops = []
num_devices = len(model._devices)
batch_norm_ops = []
injected_ops = []
spatial_bn_phase = False
sums_blobs = []
sumsq_blobs = []
name = []
input_blob_name = None
spatial_bn_gradient_phase = False
scale_grad_blobs = []
bias_grad_blobs = []
master_device = "cpu_0"
master_device_option = core.DeviceOption(caffe2_pb2.CPU)
def _gpuReduce(param, num_devices, master_device, result_blobs=None):
"""
Reduces results from multiple gpus and distributes the results back
to each device. This is done by copying values to the master device
and summing them. The master device result is then copied back to
each of the devices.
param: the name of the data (blobs) to reduce
num_devices: the number of devices
master_device: the device to copy/compute values on
result_blobs: optional list of result blobs to copy to
"""
added_ops = []
source_blobs = []
destination_blobs = []
if result_blobs is None:
result_blobs = [
"gpu_{}/{}_combined".format(i, param) for i in range(num_devices)
]
for i in range(num_devices):
device_option = core.DeviceOption(model._device_type, i)
source_blobs.append("gpu_{}/{}".format(i, param))
destination_blobs.append(
"{}/{}_gpu_{}_copy".format(master_device, param, i))
added_ops.append(
core.CreateOperator(
"CopyGPUToCPU",
source_blobs[i],
destination_blobs[i],
device_option=device_option))
added_ops.append(
core.CreateOperator(
"Sum",
destination_blobs,
"{}/{}_combined".format(master_device, param),
device_option=master_device_option))
for i in range(num_devices):
device_option = core.DeviceOption(model._device_type, i)
added_ops.append(
core.CreateOperator(
"CopyCPUToGPU",
"{}/{}_combined".format(master_device, param),
result_blobs[i],
device_option=device_option))
return added_ops
for op in orig_ops:
if op.type != 'SpatialBN' and op.type != 'SpatialBNGradient':
if spatial_bn_phase:
new_ops.extend(injected_ops)
new_ops.extend(_gpuReduce(
stripBlobName(input_blob_name) + "_sums",
num_devices,
master_device,
))
new_ops.extend(_gpuReduce(
stripBlobName(input_blob_name) + "_sumsq",
num_devices,
master_device,
))
new_ops.extend(batch_norm_ops)
injected_ops = []
batch_norm_ops = []
sums_blobs = []
sumsq_blobs = []
spatial_bn_phase = False
input_blob_name = None
elif spatial_bn_gradient_phase:
new_ops.extend(injected_ops)
new_ops.extend(_gpuReduce(
stripBlobName(scale_grad_blobs[0]),
num_devices,
master_device,
scale_grad_blobs,
))
new_ops.extend(_gpuReduce(
stripBlobName(bias_grad_blobs[0]),
num_devices,
master_device,
bias_grad_blobs,
))
new_ops.extend(batch_norm_ops)
injected_ops = []
batch_norm_ops = []
scale_grad_blobs = []
bias_grad_blobs = []
spatial_bn_gradient_phase = False
new_ops.append(op)
elif op.type == 'SpatialBN':
spatial_bn_phase = True
if input_blob_name is None:
input_blob_name = op.input[0]
name = op.input[0]
device_option = core.DeviceOption(
model._device_type,
op.device_option.device_id,
)
injected_ops.append(
core.CreateOperator(
"ChannelStats",
name,
[name + "_sums", name + "_sumsq"],
device_option=device_option))
sums_blobs.append(name + "_sums")
sumsq_blobs.append(name + "_sumsq")
op.input.append(name + "_sums_combined")
op.input.append(name + "_sumsq_combined")
op.arg.extend([utils.MakeArgument("num_batches", num_devices)])
batch_norm_ops.append(op)
elif op.type == 'SpatialBNGradient':
spatial_bn_gradient_phase = True
device_option = core.DeviceOption(
model._device_type,
op.device_option.device_id,
)
injected_ops.append(
core.CreateOperator("ChannelBackpropStats",
[op.input[0], op.input[3], op.input[4],
op.input[2]],
[op.output[1], op.output[2]],
device_option=device_option))
scale_grad_blobs.append(op.output[1])
bias_grad_blobs.append(op.output[2])
op.arg.extend([utils.MakeArgument("num_batches", num_devices)])
op.input.extend([op.output[1], op.output[2]])
batch_norm_ops.append(op)
assert not spatial_bn_phase, \
"Net modification for gpu inter-device batch normalization failed"
del model.net.Proto().op[:]
model.net.Proto().op.extend(new_ops)
|
pytorch-master
|
caffe2/python/data_parallel_model.py
|
## @package scope
# Module caffe2.python.scope
import contextlib
import threading
from past.builtins import basestring
from caffe2.proto import caffe2_pb2
# The name scope and device scope when creating a new operator.
_NAMESCOPE_SEPARATOR = '/'
_threadlocal_scope = threading.local()
def CurrentNameScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "namescope"):
_threadlocal_scope.namescope = ''
return _threadlocal_scope.namescope
def CurrentDeviceScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "devicescope"):
_threadlocal_scope.devicescope = None
return _threadlocal_scope.devicescope
@contextlib.contextmanager
def NameScope(prefix, reset=False):
global _threadlocal_scope
assert isinstance(prefix, basestring) or prefix is None, \
"NameScope takes in a string as its argument."
old_scope = CurrentNameScope()
prefix = prefix + _NAMESCOPE_SEPARATOR if prefix else ''
if reset:
_threadlocal_scope.namescope = prefix
else:
_threadlocal_scope.namescope = _threadlocal_scope.namescope + prefix
try:
yield
finally:
assert _threadlocal_scope.namescope.endswith(prefix), \
"The namescope variable is changed from outside NameScope() calls."
_threadlocal_scope.namescope = old_scope
@contextlib.contextmanager
def DeviceScope(scope, node_name=None):
new_scope = caffe2_pb2.DeviceOption()
if scope:
assert isinstance(scope, caffe2_pb2.DeviceOption), \
"DeviceScope takes in a caffe2_pb2.DeviceOption as its argument."
new_scope.CopyFrom(scope)
else:
assert node_name, "At least one argument should be non-null in DeviceScope"
# rewrite node_name if it is explicitly given
if node_name:
new_scope.node_name = node_name
global _threadlocal_scope
old_scope = CurrentDeviceScope()
# nested scope should inherit the node_name if it is not explicitly set
if old_scope and old_scope.HasField('node_name') and \
not new_scope.HasField('node_name'):
new_scope.node_name = old_scope.node_name
# nested scope should inherit the extra_info and merged it with new extra_info
if old_scope and hasattr(old_scope, 'extra_info'):
new_scope.extra_info.extend(old_scope.extra_info)
new_scope.extra_info.sort()
_threadlocal_scope.devicescope = new_scope
try:
yield
finally:
assert _threadlocal_scope.devicescope == new_scope, \
"The device scope is changed from outside DeviceScope() calls."
_threadlocal_scope.devicescope = old_scope
@contextlib.contextmanager
def EmptyNameScope():
"""
Allow users to 'disable' the name scope behaviour.
This sets the CurrentNameScope() to None, so that the field is
not set in CreateOperator(...), etc.
"""
old_scope = CurrentNameScope()
try:
_threadlocal_scope.namescope = ''
yield
finally:
_threadlocal_scope.namescope = old_scope
return
@contextlib.contextmanager
def EmptyDeviceScope():
"""
Allow users to 'disable' the device scope behaviour (so it can be
controlled at a NetDef::DeviceOption level, not overridden at
OperatorDef::DeviceOption level).
This sets the CurrentDeviceScope() to None, so that the field is
not set in CreateOperator(...), etc.
"""
old_scope = CurrentDeviceScope()
try:
_threadlocal_scope.devicescope = None
yield
finally:
_threadlocal_scope.devicescope = old_scope
return
|
pytorch-master
|
caffe2/python/scope.py
|
## @package hip_test_util
# Module caffe2.python.hip_test_util
"""
The HIP test utils is a small addition on top of the hypothesis test utils
under caffe2/python, which allows one to more easily test HIP/ROCm related
operators.
"""
from caffe2.proto import caffe2_pb2
def run_in_hip(gc, dc):
return (gc.device_type == caffe2_pb2.HIP) or (
caffe2_pb2.HIP in {d.device_type for d in dc})
|
pytorch-master
|
caffe2/python/hip_test_util.py
|
## @package model_helper_api
# Module caffe2.python.model_helper_api
import sys
import copy
import inspect
from past.builtins import basestring
from caffe2.python.model_helper import ModelHelper
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.elementwise_linear import *
from caffe2.python.helpers.fc import *
from caffe2.python.helpers.nonlinearity import *
from caffe2.python.helpers.normalization import *
from caffe2.python.helpers.pooling import *
from caffe2.python.helpers.quantization import *
from caffe2.python.helpers.tools import *
from caffe2.python.helpers.train import *
class HelperWrapper(object):
_registry = {
'arg_scope': arg_scope,
'fc': fc,
'packed_fc': packed_fc,
'fc_decomp': fc_decomp,
'fc_sparse': fc_sparse,
'fc_prune': fc_prune,
'dropout': dropout,
'max_pool': max_pool,
'average_pool': average_pool,
'max_pool_with_index' : max_pool_with_index,
'lrn': lrn,
'softmax': softmax,
'instance_norm': instance_norm,
'spatial_bn': spatial_bn,
'spatial_gn': spatial_gn,
'moments_with_running_stats': moments_with_running_stats,
'relu': relu,
'prelu': prelu,
'tanh': tanh,
'concat': concat,
'depth_concat': depth_concat,
'sum': sum,
'reduce_sum': reduce_sum,
'sub': sub,
'arg_min': arg_min,
'transpose': transpose,
'iter': iter,
'accuracy': accuracy,
'conv': conv,
'conv_nd': conv_nd,
'conv_transpose': conv_transpose,
'group_conv': group_conv,
'group_conv_deprecated': group_conv_deprecated,
'image_input': image_input,
'video_input': video_input,
'add_weight_decay': add_weight_decay,
'elementwise_linear': elementwise_linear,
'layer_norm': layer_norm,
'mat_mul' : mat_mul,
'batch_mat_mul' : batch_mat_mul,
'cond' : cond,
'loop' : loop,
'db_input' : db_input,
'fused_8bit_rowwise_quantized_to_float' : fused_8bit_rowwise_quantized_to_float,
'sparse_lengths_sum_4bit_rowwise_sparse': sparse_lengths_sum_4bit_rowwise_sparse,
}
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, helper_name):
if helper_name not in self._registry:
raise AttributeError(
"Helper function {} not "
"registered.".format(helper_name)
)
def scope_wrapper(*args, **kwargs):
new_kwargs = {}
if helper_name != 'arg_scope':
if len(args) > 0 and isinstance(args[0], ModelHelper):
model = args[0]
elif 'model' in kwargs:
model = kwargs['model']
else:
raise RuntimeError(
"The first input of helper function should be model. " \
"Or you can provide it in kwargs as model=<your_model>.")
new_kwargs = copy.deepcopy(model.arg_scope)
func = self._registry[helper_name]
var_names, _, varkw, _= inspect.getargspec(func)
if varkw is None:
# this helper function does not take in random **kwargs
new_kwargs = {
var_name: new_kwargs[var_name]
for var_name in var_names if var_name in new_kwargs
}
cur_scope = get_current_scope()
new_kwargs.update(cur_scope.get(helper_name, {}))
new_kwargs.update(kwargs)
return func(*args, **new_kwargs)
scope_wrapper.__name__ = helper_name
return scope_wrapper
def Register(self, helper):
name = helper.__name__
if name in self._registry:
raise AttributeError(
"Helper {} already exists. Please change your "
"helper name.".format(name)
)
self._registry[name] = helper
def has_helper(self, helper_or_helper_name):
helper_name = (
helper_or_helper_name
if isinstance(helper_or_helper_name, basestring) else
helper_or_helper_name.__name__
)
return helper_name in self._registry
# pyre-fixme[6]: incompatible parameter type: expected ModuleType, got HelperWrapper
sys.modules[__name__] = HelperWrapper(sys.modules[__name__])
|
pytorch-master
|
caffe2/python/brew.py
|
## @package net_printer
# Module caffe2.python.net_printer
from caffe2.proto.caffe2_pb2 import OperatorDef, NetDef
from caffe2.python.checkpoint import Job
from caffe2.python.core import Net, ExecutionStep, Plan
from caffe2.python.task import Task, TaskGroup, WorkspaceType, TaskOutput
from collections import defaultdict
from contextlib import contextmanager
from copy import copy
from future.utils import viewkeys
from itertools import chain
from six import binary_type, text_type
class Visitor(object):
@classmethod
def register(cls, Type):
if not(hasattr(cls, 'visitors')):
cls.visitors = {}
else:
assert Type not in cls.visitors, \
'{} already registered!'.format(Type)
def _register(func):
cls.visitors[Type] = func
return func
return _register
def __call__(self, obj, *args, **kwargs):
if obj is None:
return
Type = type(obj)
if Type not in self.__class__.visitors:
raise TypeError('%s: unsupported object type: %s' % (
self.__class__.__name__, Type))
func = self.__class__.visitors[Type]
return func(self, obj, *args, **kwargs)
class Analyzer(Visitor):
PREFIXES_TO_IGNORE = {'distributed_ctx_init'}
def __init__(self):
self.workspaces = defaultdict(lambda: defaultdict(lambda: 0))
self.workspace_ctx = []
@property
def workspace(self):
return self.workspace_ctx[-1]
@contextmanager
def set_workspace(self, node=None, ws=None, do_copy=False):
if ws is not None:
ws = ws
elif node is not None:
ws = self.workspaces[str(node)]
else:
ws = self.workspace
if do_copy:
ws = copy(ws)
self.workspace_ctx.append(ws)
yield ws
del self.workspace_ctx[-1]
def define_blob(self, blob):
self.workspace[blob] += 1
def need_blob(self, blob):
if any(blob.startswith(p) for p in Analyzer.PREFIXES_TO_IGNORE):
return
assert blob in self.workspace, 'Blob undefined: %s' % blob
@Analyzer.register(OperatorDef)
def analyze_op(analyzer, op):
for x in op.input:
analyzer.need_blob(x)
for x in op.output:
analyzer.define_blob(x)
@Analyzer.register(Net)
def analyze_net(analyzer, net):
for x in net.Proto().op:
analyzer(x)
@Analyzer.register(ExecutionStep)
def analyze_step(analyzer, step):
proto = step.Proto()
with analyzer.set_workspace(do_copy=proto.create_workspace):
if proto.report_net:
with analyzer.set_workspace(do_copy=True):
analyzer(step.get_net(proto.report_net))
all_new_blobs = set()
substeps = step.Substeps() + [step.get_net(n) for n in proto.network]
for substep in substeps:
with analyzer.set_workspace(
do_copy=proto.concurrent_substeps) as ws_in:
analyzer(substep)
if proto.should_stop_blob:
analyzer.need_blob(proto.should_stop_blob)
if proto.concurrent_substeps:
new_blobs = set(viewkeys(ws_in)) - set(viewkeys(analyzer.workspace))
assert len(all_new_blobs & new_blobs) == 0, (
'Error: Blobs created by multiple parallel steps: %s' % (
', '.join(all_new_blobs & new_blobs)))
all_new_blobs |= new_blobs
for x in all_new_blobs:
analyzer.define_blob(x)
@Analyzer.register(Task)
def analyze_task(analyzer, task):
# check that our plan protobuf is not too large (limit of 64Mb)
step = task.get_step()
plan = Plan(task.node)
plan.AddStep(step)
proto_len = len(plan.Proto().SerializeToString())
assert proto_len < 2 ** 26, (
'Due to a protobuf limitation, serialized tasks must be smaller '
'than 64Mb, but this task has {} bytes.' % proto_len)
is_private = task.workspace_type() != WorkspaceType.GLOBAL
with analyzer.set_workspace(do_copy=is_private):
analyzer(step)
@Analyzer.register(TaskGroup)
def analyze_task_group(analyzer, tg):
for task in tg.tasks_by_node().tasks():
with analyzer.set_workspace(node=task.node):
analyzer(task)
@Analyzer.register(Job)
def analyze_job(analyzer, job):
analyzer(job.init_group)
analyzer(job.epoch_group)
def analyze(obj):
"""
Given a Job, visits all the execution steps making sure that:
- no undefined blobs will be found during execution
- no blob with same name is defined in concurrent steps
"""
Analyzer()(obj)
class Text(object):
def __init__(self):
self._indent = 0
self._lines_in_context = [0]
self.lines = []
@contextmanager
def context(self, text):
if text is not None:
self.add('with %s:' % text)
self._indent += 4
self._lines_in_context.append(0)
yield
if text is not None:
if self._lines_in_context[-1] == 0:
self.add('pass')
self._indent -= 4
del self._lines_in_context[-1]
def add(self, text):
self._lines_in_context[-1] += 1
self.lines.append((' ' * self._indent) + text)
def __str__(self):
return '\n'.join(self.lines)
class Printer(Visitor, Text):
def __init__(self, factor_prefixes=False, c2_syntax=True):
super(Visitor, self).__init__()
super(Text, self).__init__()
self.factor_prefixes = factor_prefixes
self.c2_syntax = c2_syntax
self.c2_net_name = None
def _sanitize_str(s):
if isinstance(s, text_type):
sanitized = s
elif isinstance(s, binary_type):
sanitized = s.decode('ascii', errors='ignore')
else:
sanitized = str(s)
if len(sanitized) < 64:
return "'%s'" % sanitized
else:
return "'%s'" % sanitized[:64] + '...<+len=%d>' % (len(sanitized) - 64)
def _arg_val(arg):
if arg.HasField('f'):
return str(arg.f)
if arg.HasField('i'):
return str(arg.i)
if arg.HasField('s'):
return _sanitize_str(arg.s)
if arg.floats:
return str(list(arg.floats))
if arg.ints:
return str(list(arg.ints))
if arg.strings:
return str([_sanitize_str(s) for s in arg.strings])
return '[]'
def commonprefix(m):
"Given a list of strings, returns the longest common prefix"
if not m:
return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def format_value(val):
if isinstance(val, list):
return '[%s]' % ', '.join("'%s'" % str(v) for v in val)
else:
return str(val)
def factor_prefix(vals, do_it):
vals = [format_value(v) for v in vals]
prefix = commonprefix(vals) if len(vals) > 1 and do_it else ''
joined = ', '.join(v[len(prefix):] for v in vals)
return '%s[%s]' % (prefix, joined) if prefix else joined
def call(op, inputs=None, outputs=None, factor_prefixes=False):
if not inputs:
inputs = ''
else:
inputs_v = [a for a in inputs if not isinstance(a, tuple)]
inputs_kv = [a for a in inputs if isinstance(a, tuple)]
inputs = ', '.join(
x
for x in chain(
[factor_prefix(inputs_v, factor_prefixes)],
('%s=%s' % kv for kv in inputs_kv),
)
if x
)
call = '%s(%s)' % (op, inputs)
return call if not outputs else '%s = %s' % (
factor_prefix(outputs, factor_prefixes), call)
def format_device_option(dev_opt):
if not dev_opt or not (
dev_opt.device_type or dev_opt.device_id or dev_opt.node_name):
return None
return call(
'DeviceOption',
[dev_opt.device_type, dev_opt.device_id, "'%s'" % dev_opt.node_name])
@Printer.register(OperatorDef)
def print_op(text, op):
args = [(a.name, _arg_val(a)) for a in op.arg]
dev_opt_txt = format_device_option(op.device_option)
if dev_opt_txt:
args.append(('device_option', dev_opt_txt))
if text.c2_net_name:
text.add(call(
text.c2_net_name + '.' + op.type,
[list(op.input), list(op.output)] + args))
else:
text.add(call(
op.type,
list(op.input) + args,
op.output,
factor_prefixes=text.factor_prefixes))
for arg in op.arg:
if arg.HasField('n'):
with text.context('arg: %s' % arg.name):
text(arg.n)
@Printer.register(NetDef)
def print_net_def(text, net_def):
if text.c2_syntax:
text.add(call('core.Net', ["'%s'" % net_def.name], [net_def.name]))
text.c2_net_name = net_def.name
else:
text.add('# net: %s' % net_def.name)
for op in net_def.op:
text(op)
if text.c2_syntax:
text.c2_net_name = None
@Printer.register(Net)
def print_net(text, net):
text(net.Proto())
def _get_step_context(step):
proto = step.Proto()
if proto.should_stop_blob:
return call('loop'), False
if proto.num_iter and proto.num_iter != 1:
return call('loop', [proto.num_iter]), False
if proto.num_concurrent_instances > 1:
return (
call('parallel',
[('num_instances', proto.num_concurrent_instances)]),
len(step.Substeps()) > 1)
concurrent = proto.concurrent_substeps and len(step.Substeps()) > 1
if concurrent:
return call('parallel'), True
if proto.report_net:
return call('run_once'), False
return None, False
@Printer.register(ExecutionStep)
def print_step(text, step):
proto = step.Proto()
step_ctx, do_substep = _get_step_context(step)
with text.context(step_ctx):
if proto.report_net:
with text.context(call('report_net', [proto.report_interval])):
text(step.get_net(proto.report_net))
substeps = step.Substeps() + [step.get_net(n) for n in proto.network]
for substep in substeps:
sub_proto = (
substep.Proto() if isinstance(substep, ExecutionStep) else None)
if sub_proto is not None and sub_proto.run_every_ms:
substep_ctx = call(
'reporter',
[str(substep), ('interval_ms', sub_proto.run_every_ms)])
elif do_substep:
title = (
'workspace'
if sub_proto is not None and sub_proto.create_workspace else
'step')
substep_ctx = call(title, [str(substep)])
else:
substep_ctx = None
with text.context(substep_ctx):
text(substep)
if proto.should_stop_blob:
text.add(call('yield stop_if', [proto.should_stop_blob]))
def _print_task_output(x):
assert isinstance(x, TaskOutput)
return 'Output[' + ', '.join(str(x) for x in x.names) + ']'
@Printer.register(Task)
def print_task(text, task):
outs = ', '.join(_print_task_output(o) for o in task.outputs())
context = [('node', task.node), ('name', task.name), ('outputs', outs)]
with text.context(call('Task', context)):
text(task.get_step())
@Printer.register(TaskGroup)
def print_task_group(text, tg, header=None):
with text.context(header or call('TaskGroup')):
for task in tg.tasks_by_node().tasks():
text(task)
@Printer.register(Job)
def print_job(text, job):
text(job.init_group, 'Job.current().init_group')
text(job.epoch_group, 'Job.current().epoch_group')
with text.context('Job.current().stop_conditions'):
for out in job.stop_conditions:
text.add(_print_task_output(out))
text(job.download_group, 'Job.current().download_group')
text(job.exit_group, 'Job.current().exit_group')
def to_string(obj, **kwargs):
"""
Given a Net, ExecutionStep, Task, TaskGroup or Job, produces a string
with detailed description of the execution steps.
"""
printer = Printer(**kwargs)
printer(obj)
return str(printer)
def debug_net(net):
"""
Given a Net, produce another net that logs info about the operator call
before each operator execution. Use for debugging purposes.
"""
assert isinstance(net, Net)
debug_net = Net(str(net))
assert isinstance(net, Net)
for op in net.Proto().op:
text = Text()
print_op(op, text)
debug_net.LogInfo(str(text))
debug_net.Proto().op.extend([op])
return debug_net
|
pytorch-master
|
caffe2/python/net_printer.py
|
## @package schema
# Module caffe2.python.schema
"""
Defines a minimal set of data types that allow to represent datasets with
arbitrary nested structure, including objects of variable length, such as
maps and lists.
This defines a columnar storage format for such datasets on top of caffe2
tensors. In terms of capacity of representation, it can represent most of
the data types supported by Parquet, ORC, DWRF file formats.
See comments in operator_test/dataset_ops_test.py for an example and
walkthrough on how to use schema to store and iterate through a structured
in-memory dataset.
"""
import logging
import numpy as np
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.core import BlobReference
from collections import OrderedDict, namedtuple
from past.builtins import basestring
from future.utils import viewitems, viewkeys, viewvalues
from itertools import islice
from six import StringIO
from typing import Sequence
logger = logging.getLogger(__name__)
FIELD_SEPARATOR = ':'
def _join_field_name(prefix, suffix):
if prefix and suffix:
return '{}{}{}'.format(prefix, FIELD_SEPARATOR, suffix)
elif prefix:
return prefix
elif suffix:
return suffix
else:
return ''
def _normalize_field(field_or_type_or_blob, keep_blobs=True):
"""Clones/normalizes a field before adding it to a container."""
if isinstance(field_or_type_or_blob, Field):
return field_or_type_or_blob.clone(keep_blobs=keep_blobs)
elif type(field_or_type_or_blob) in (type, np.dtype):
return Scalar(dtype=field_or_type_or_blob)
else:
return Scalar(blob=field_or_type_or_blob)
FeatureSpec = namedtuple(
'FeatureSpec',
[
'feature_type',
'feature_names',
'feature_ids',
'feature_is_request_only',
'desired_hash_size',
'feature_to_index',
]
)
# pyre-fixme[16]: `FeatureSpec.__new__` has no attribute `__defaults__`
FeatureSpec.__new__.__defaults__ = (None, None, None, None, None, None)
class Metadata(
namedtuple(
'Metadata', ['categorical_limit', 'expected_value', 'feature_specs']
)
):
"""Represents additional information associated with a scalar in schema.
`categorical_limit` - for fields of integral type that are guaranteed to be
non-negative it specifies the maximum possible value plus one. It's often
used as a size of an embedding table.
`expected_value` - anticipated average value of elements in the field.
Usually makes sense for length fields of lists.
`feature_specs` - information about the features that contained in this
field. For example if field have more than 1 feature it can have list of
feature names contained in this field."""
__slots__: Sequence[str] = ()
# pyre-fixme[16]: `Metadata.__new__` has no attribute `__defaults__`
Metadata.__new__.__defaults__ = (None, None, None)
class Field(object):
"""Represents an abstract field type in a dataset.
"""
__slots__: Sequence[str] = ("_parent", "_field_offsets")
def __init__(self, children):
"""Derived classes must call this after their initialization."""
self._parent = (None, 0)
offset = 0
self._field_offsets = []
for child in children:
self._field_offsets.append(offset)
offset += len(child.field_names())
self._field_offsets.append(offset)
def clone_schema(self):
return self.clone(keep_blobs=False)
def field_names(self):
"""Return the children field names for this field."""
raise NotImplementedError('Field is an abstract class.')
def field_types(self):
"""Return the numpy.dtype for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def field_metadata(self):
"""Return the Metadata for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def field_blobs(self):
"""Return the list of blobs with contents for this Field.
Values can either be all numpy.ndarray or BlobReference.
If any of the fields doesn't have a blob, throws.
"""
raise NotImplementedError('Field is an abstract class.')
def all_scalars(self):
"""Return the list of all Scalar instances in the Field.
The order is the same as for field_names() or field_blobs()"""
raise NotImplementedError('Field is an abstract class.')
def has_blobs(self):
"""Return True if every scalar of this field has blobs."""
raise NotImplementedError('Field is an abstract class.')
def clone(self, keep_blobs=True):
"""Clone this Field along with its children."""
raise NotImplementedError('Field is an abstract class.')
def _set_parent(self, parent, relative_id):
self._parent = (parent, relative_id)
def slice(self):
"""
Returns a slice representing the range of field ids that belong to
this field. This slice can be used to index a list of fields.
E.g.:
>>> s = Struct(
>>> ('a', Scalar()),
>>> ('b', Struct(
>>> ('b1', Scalar()),
>>> ('b2', Scalar()),
>>> )),
>>> ('c', Scalar()),
>>> )
>>> field_data = ['da', 'db1', 'db2', 'dc']
>>> field_data[s.b.split()]
['db1', 'db2']
"""
base_id = self._child_base_id()
return slice(base_id, base_id + len(self.field_names()))
def _child_base_id(self, child_index=None):
"""Get the base id of the given child"""
p, i = self._parent
pos = 0 if child_index is None else self._field_offsets[child_index]
if p:
pos += p._child_base_id(i)
return pos
def __eq__(self, other):
"""Equivalance of two schemas"""
return (
(self.field_names() == other.field_names()) and
(self.field_types() == other.field_types()) and
(self.field_metadata() == other.field_metadata())
)
def _pprint_impl(self, indent, str_buffer):
raise NotImplementedError('Field is an abstract class.')
def __repr__(self):
str_buffer = StringIO()
self._pprint_impl(0, str_buffer)
contents = str_buffer.getvalue()
str_buffer.close()
return contents
class List(Field):
"""Represents a variable-length list.
Values of a list can also be complex fields such as Lists and Structs.
In addition to the fields exposed by its `values` field, a List exposes an
additional `lengths` field, which will contain the size of each list under
the parent domain.
"""
__slots__: Sequence[str] = ("lengths", "_items")
def __init__(self, values, lengths_blob=None):
if isinstance(lengths_blob, Field):
assert isinstance(lengths_blob, Scalar)
self.lengths = _normalize_field(lengths_blob)
else:
self.lengths = Scalar(np.int32, lengths_blob)
self._items = _normalize_field(values)
self.lengths._set_parent(self, 0)
self._items._set_parent(self, 1)
super(List, self).__init__([self.lengths, self._items])
def field_names(self):
value_fields = self._items.field_names()
return (
['lengths'] + [_join_field_name('values', v) for v in value_fields]
)
def field_types(self):
return self.lengths.field_types() + self._items.field_types()
def field_metadata(self):
return self.lengths.field_metadata() + self._items.field_metadata()
def field_blobs(self):
return self.lengths.field_blobs() + self._items.field_blobs()
def all_scalars(self):
return self.lengths.all_scalars() + self._items.all_scalars()
def has_blobs(self):
return self.lengths.has_blobs() and self._items.has_blobs()
def clone(self, keep_blobs=True):
return type(self)(
_normalize_field(self._items, keep_blobs=keep_blobs),
_normalize_field(self.lengths, keep_blobs=keep_blobs)
)
def _pprint_impl(self, indent, str_buffer):
str_buffer.write(' ' * indent + "List(\n")
str_buffer.write(' ' * (indent + 1) + "lengths=\n")
self.lengths._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * (indent + 1) + "_items=\n")
self._items._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * indent + ")\n")
def __getattr__(self, item):
"""If the value of this list is a struct,
allow to introspect directly into its fields."""
if item.startswith('__'):
raise AttributeError(item)
if isinstance(self._items, Struct):
return getattr(self._items, item)
elif item == 'value' or item == 'items':
return self._items
else:
raise AttributeError('Field not found in list: %s.' % item)
def __getitem__(self, item):
names = item.split(FIELD_SEPARATOR, 1)
if len(names) == 1:
if item == 'lengths':
return self.lengths
elif item == 'values':
return self._items
else:
if names[0] == 'values':
return self._items[names[1]]
raise KeyError('Field not found in list: %s.' % item)
class ListWithEvicted(List):
"""
This class is similar with List, but containing extra field evicted_values for
LRU Hashing.
"""
__slots__: Sequence[str] = ("_evicted_values",)
def __init__(self, values, lengths_blob=None, evicted_values=None):
if isinstance(evicted_values, Field):
assert isinstance(evicted_values, Scalar)
self._evicted_values = _normalize_field(evicted_values)
else:
self._evicted_values = Scalar(np.int64, evicted_values)
super(ListWithEvicted, self).__init__(values, lengths_blob=lengths_blob)
def field_names(self):
value_fields = self._items.field_names()
return (
['lengths'] + [_join_field_name('values', v) for v in value_fields] + ["_evicted_values"]
)
def field_types(self):
return self.lengths.field_types() + self._items.field_types() + self._evicted_values.field_types()
def field_metadata(self):
return self.lengths.field_metadata() + self._items.field_metadata() + self._evicted_values.field_metadata()
def field_blobs(self):
return self.lengths.field_blobs() + self._items.field_blobs() + self._evicted_values.field_blobs()
def all_scalars(self):
return self.lengths.all_scalars() + self._items.all_scalars() + self._evicted_values.all_scalars()
def has_blobs(self):
return self.lengths.has_blobs() and self._items.has_blobs() + self._evicted_values.has_blobs()
def clone(self, keep_blobs=True):
return type(self)(
_normalize_field(self._items, keep_blobs=keep_blobs),
_normalize_field(self.lengths, keep_blobs=keep_blobs),
_normalize_field(self._evicted_values, keep_blobs=keep_blobs)
)
def _pprint_impl(self, indent, str_buffer):
str_buffer.write(' ' * indent + "ListWithEvicted(\n")
str_buffer.write(' ' * (indent + 1) + "lengths=\n")
self.lengths._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * (indent + 1) + "_items=\n")
self._items._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * (indent + 1) + "_evicted_values=\n")
self._evicted_values._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * indent + ")\n")
def __getattr__(self, item):
"""If the value of this list is a struct,
allow to introspect directly into its fields."""
if item.startswith('__'):
raise AttributeError(item)
if item == "_evicted_values":
return self._evicted_values
if isinstance(self._items, Struct):
return getattr(self._items, item)
elif item == 'value' or item == 'items':
return self._items
else:
raise AttributeError('Field not found in list: %s.' % item)
def __getitem__(self, item):
names = item.split(FIELD_SEPARATOR, 1)
if len(names) == 1:
if item == 'lengths':
return self.lengths
elif item == 'values':
return self._items
elif item == '_evicted_values':
return self._evicted_values
else:
if names[0] == 'values':
return self._items[names[1]]
raise KeyError('Field not found in list: %s.' % item)
class Struct(Field):
"""Represents a named list of fields sharing the same domain.
"""
__slots__: Sequence[str] = ("fields", "_frozen")
def __init__(self, *fields):
""" fields is a list of tuples in format of (name, field). The name is
a string of nested name, e.g., `a`, `a:b`, `a:b:c`. For example
Struct(
('a', Scalar()),
('b:c', Scalar()),
('b:d:e', Scalar()),
('b', Struct(
('f', Scalar()),
)),
)
is equal to
Struct(
('a', Scalar()),
('b', Struct(
('c', Scalar()),
('d', Struct(('e', Scalar()))),
('f', Scalar()),
)),
)
"""
for field in fields:
assert len(field) == 2
assert field[0], 'Field names cannot be empty'
assert field[0] != 'lengths', (
'Struct cannot contain a field named `lengths`.'
)
fields = [(name, _normalize_field(field)) for name, field in fields]
self.fields = OrderedDict()
for name, field in fields:
if FIELD_SEPARATOR in name:
name, field = self._struct_from_nested_name(name, field)
if name not in self.fields:
self.fields[name] = field
continue
if (
not isinstance(field, Struct) or
not isinstance(self.fields[name], Struct)
):
raise ValueError('Duplicate field name: %s' % name)
self.fields[name] = self.fields[name] + field
for id, (_, field) in enumerate(viewitems(self.fields)):
field._set_parent(self, id)
super(Struct, self).__init__(viewvalues(self.fields))
self._frozen = True
def _struct_from_nested_name(self, nested_name, field):
def create_internal(nested_name, field):
names = nested_name.split(FIELD_SEPARATOR, 1)
if len(names) == 1:
added_field = field
else:
added_field = create_internal(names[1], field)
return Struct((names[0], added_field))
names = nested_name.split(FIELD_SEPARATOR, 1)
assert len(names) >= 2
return names[0], create_internal(names[1], field)
def get_children(self):
return list(viewitems(self.fields))
def field_names(self):
names = []
for name, field in viewitems(self.fields):
names += [_join_field_name(name, f) for f in field.field_names()]
return names
def field_types(self):
types = []
for _, field in viewitems(self.fields):
types += field.field_types()
return types
def field_metadata(self):
metadata = []
for _, field in viewitems(self.fields):
metadata += field.field_metadata()
return metadata
def field_blobs(self):
blobs = []
for _, field in viewitems(self.fields):
blobs += field.field_blobs()
return blobs
def all_scalars(self):
scalars = []
for _, field in viewitems(self.fields):
scalars += field.all_scalars()
return scalars
def has_blobs(self):
return all(field.has_blobs() for field in viewvalues(self.fields))
def clone(self, keep_blobs=True):
normalized_fields = [
(k, _normalize_field(v, keep_blobs=keep_blobs))
for k, v in viewitems(self.fields)
]
return type(self)(*normalized_fields)
def _get_field_by_nested_name(self, nested_name):
names = nested_name.split(FIELD_SEPARATOR, 1)
field = self.fields.get(names[0], None)
if field is None:
return None
if len(names) == 1:
return field
try:
return field[names[1]]
except (KeyError, TypeError):
return None
def _pprint_impl(self, indent, str_buffer):
str_buffer.write(' ' * indent + "Struct( \n")
for name, field in viewitems(self.fields):
str_buffer.write(' ' * (indent + 1) + "{}=".format(name) + "\n")
field._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * indent + ") \n")
def __contains__(self, item):
field = self._get_field_by_nested_name(item)
return field is not None
def __len__(self):
return len(self.fields)
def __getitem__(self, item):
"""
item can be a tuple or list of ints or strings, or a single
int or string. String item is a nested field name, e.g., "a", "a:b",
"a:b:c". Int item is the index of a field at the first level of the
Struct.
"""
if isinstance(item, list) or isinstance(item, tuple):
keys = list(viewkeys(self.fields))
return Struct(
* [
(
keys[k]
if isinstance(k, int) else k, self[k]
) for k in item
]
)
elif isinstance(item, int):
return next(islice(viewvalues(self.fields), item, None))
else:
field = self._get_field_by_nested_name(item)
if field is None:
raise KeyError('field "%s" not found' % (item))
return field
def get(self, item, default_value):
"""
similar to python's dictionary get method, return field of item if found
(i.e. self.item is valid) or otherwise return default_value
it's a syntax suger of python's builtin getattr method
"""
return getattr(self, item, default_value)
def __getattr__(self, item):
if item.startswith('__'):
raise AttributeError(item)
try:
return super(Struct, self).__getattribute__("fields")[item]
except KeyError:
raise AttributeError(item)
def __setattr__(self, key, value):
# Disable setting attributes after initialization to prevent false
# impression of being able to overwrite a field.
# Allowing setting internal states mainly so that _parent can be set
# post initialization.
if getattr(self, '_frozen', None) and not key.startswith('_'):
raise TypeError('Struct.__setattr__() is disabled after __init__()')
super(Struct, self).__setattr__(key, value)
def __add__(self, other):
"""
Allows to merge fields of two schema.Struct using '+' operator.
If two Struct have common field names, the merge is conducted
recursively. Here are examples:
Example 1
s1 = Struct(('a', Scalar()))
s2 = Struct(('b', Scalar()))
s1 + s2 == Struct(
('a', Scalar()),
('b', Scalar()),
)
Example 2
s1 = Struct(
('a', Scalar()),
('b', Struct(('c', Scalar()))),
)
s2 = Struct(('b', Struct(('d', Scalar()))))
s1 + s2 == Struct(
('a', Scalar()),
('b', Struct(
('c', Scalar()),
('d', Scalar()),
)),
)
"""
if not isinstance(other, Struct):
return NotImplemented
children = OrderedDict(self.get_children())
for name, right_field in other.get_children():
if name not in children:
children[name] = right_field
continue
left_field = children[name]
if not (isinstance(left_field, Struct) and isinstance(right_field, Struct)):
raise TypeError(
"Type of left_field, " + str(type(left_field)) +
", and type of right_field, " +
str(type(right_field)) +
", must both the Struct to allow merging of the field, " + name)
children[name] = left_field + right_field
return Struct(*(viewitems(children)))
def __sub__(self, other):
"""
Allows to remove common fields of two schema.Struct from self by
using '-' operator. If two Struct have common field names, the
removal is conducted recursively. If a child struct has no fields
inside, it will be removed from its parent. Here are examples:
Example 1
s1 = Struct(
('a', Scalar()),
('b', Scalar()),
)
s2 = Struct(('a', Scalar()))
s1 - s2 == Struct(('b', Scalar()))
Example 2
s1 = Struct(
('b', Struct(
('c', Scalar()),
('d', Scalar()),
))
)
s2 = Struct(
('b', Struct(('c', Scalar()))),
)
s1 - s2 == Struct(
('b', Struct(
('d', Scalar()),
)),
)
Example 3
s1 = Struct(
('a', Scalar()),
('b', Struct(
('d', Scalar()),
))
)
s2 = Struct(
('b', Struct(
('c', Scalar())
('d', Scalar())
)),
)
s1 - s2 == Struct(
('a', Scalar()),
)
"""
if not isinstance(other, Struct):
return NotImplemented
children = OrderedDict(self.get_children())
for name, right_field in other.get_children():
if name in children:
left_field = children[name]
if type(left_field) == type(right_field):
if isinstance(left_field, Struct):
child = left_field - right_field
if child.get_children():
children[name] = child
continue
children.pop(name)
else:
raise TypeError(
"Type of left_field, " + str(type(left_field)) +
", is not the same as that of right_field, " +
str(type(right_field)) +
", yet they have the same field name, " + name)
return Struct(*(children.items()))
class Scalar(Field):
"""Represents a typed scalar or tensor of fixed shape.
A Scalar is a leaf in a schema tree, translating to exactly one tensor in
the dataset's underlying storage.
Usually, the tensor storing the actual values of this field is a 1D tensor,
representing a series of values in its domain. It is possible however to
have higher rank values stored as a Scalar, as long as all entries have
the same shape.
E.g.:
Scalar(np.float64)
Scalar field of type float64. Caffe2 will expect readers and
datasets to expose it as a 1D tensor of doubles (vector), where
the size of the vector is determined by this fields' domain.
Scalar((np.int32, 5))
Tensor field of type int32. Caffe2 will expect readers and
datasets to implement it as a 2D tensor (matrix) of shape (L, 5),
where L is determined by this fields' domain.
Scalar((str, (10, 20)))
Tensor field of type str. Caffe2 will expect readers and
datasets to implement it as a 3D tensor of shape (L, 10, 20),
where L is determined by this fields' domain.
If the field type is unknown at construction time, call Scalar(), that will
default to np.void as its dtype.
It is an error to pass a structured dtype to Scalar, since it would contain
more than one field. Instead, use from_dtype, which will construct
a nested `Struct` field reflecting the given dtype's structure.
A Scalar can also contain a blob, which represents the value of this
Scalar. A blob can be either a numpy.ndarray, in which case it contain the
actual contents of the Scalar, or a BlobReference, which represents a
blob living in a caffe2 Workspace. If blob of different types are passed,
a conversion to numpy.ndarray is attempted.
"""
__slots__: Sequence[str] = ("_metadata", "dtype", "_original_dtype", "_blob")
def __init__(self, dtype=None, blob=None, metadata=None):
self._metadata = None
self.set(dtype, blob, metadata, unsafe=True)
super(Scalar, self).__init__([])
def field_names(self):
return ['']
def field_type(self):
return self.dtype
def field_types(self):
return [self.dtype]
def field_metadata(self):
return [self._metadata]
def has_blobs(self):
return self._blob is not None
def field_blobs(self):
assert self._blob is not None, 'Value is not set for this field.'
return [self._blob]
def all_scalars(self):
return [self]
def clone(self, keep_blobs=True):
return Scalar(
dtype=self._original_dtype,
blob=self._blob if keep_blobs else None,
metadata=self._metadata
)
def get(self):
"""Gets the current blob of this Scalar field."""
assert self._blob is not None, 'Value is not set for this field.'
return self._blob
def __call__(self):
"""Shortcut for self.get()"""
return self.get()
@property
def metadata(self):
return self._metadata
def set_metadata(self, value):
assert isinstance(value, Metadata), \
'metadata must be Metadata, got {}'.format(type(value))
self._metadata = value
self._validate_metadata()
def _validate_metadata(self):
if self._metadata is None:
return
if (self._metadata.categorical_limit is not None and
self.dtype is not None):
assert np.issubdtype(self.dtype, np.integer), \
"`categorical_limit` can be specified only in integral " + \
"fields but got {}".format(self.dtype)
def set_value(self, blob, throw_on_type_mismatch=False, unsafe=False):
"""Sets only the blob field still validating the existing dtype"""
if self.dtype.base != np.void and throw_on_type_mismatch:
assert isinstance(blob, np.ndarray), "Got {!r}".format(blob)
assert blob.dtype.base == self.dtype.base, (
"Expected {}, got {}".format(self.dtype.base, blob.dtype.base))
self.set(dtype=self._original_dtype, blob=blob, unsafe=unsafe)
def set(self, dtype=None, blob=None, metadata=None, unsafe=False):
"""Set the type and/or blob of this scalar. See __init__ for details.
Args:
dtype: can be any numpy type. If not provided and `blob` is
provided, it will be inferred. If no argument is provided,
this Scalar will be of type np.void.
blob: if provided, can be either a BlobReference or a
numpy.ndarray. If a value of different type is passed,
a conversion to numpy.ndarray is attempted. Strings aren't
accepted, since they can be ambiguous. If you want to pass
a string, to either BlobReference(blob) or np.array(blob).
metadata: optional instance of Metadata, if provided overrides
the metadata information of the scalar
"""
if not unsafe:
logger.warning(
"Scalar should be considered immutable. Only call Scalar.set() "
"on newly created Scalar with unsafe=True. This will become an "
"error soon."
)
if blob is not None and isinstance(blob, basestring):
raise ValueError(
'Passing str blob to Scalar.set() is ambiguous. '
'Do either set(blob=np.array(blob)) or '
'set(blob=BlobReference(blob))'
)
self._original_dtype = dtype
# Numpy will collapse a shape of 1 into an unindexed data array (shape = ()),
# which betrays the docstring of this class (which expects shape = (1,)).
# >>> import numpy as np
# >> np.dtype((np.int32, 1))
# dtype('int32')
# >>> np.dtype((np.int32, 5))
# dtype(('<i4', (5,)))
if dtype is not None and isinstance(dtype, tuple) and dtype[1] == 1:
dtype = (dtype[0], (1,))
if dtype is not None:
if isinstance(dtype, tuple) and dtype[0] == np.void:
raise TypeError(
"Cannot set the Scalar with type {} for blob {}."
"If this blob is the output of some operation, "
"please verify the input of that operation has "
"proper type.".format(dtype, blob)
)
dtype = np.dtype(dtype)
# If blob is not None and it is not a BlobReference, we assume that
# it is actual tensor data, so we will try to cast it to a numpy array.
if blob is not None and not isinstance(blob, BlobReference):
preserve_shape = isinstance(blob, np.ndarray)
if dtype is not None and dtype != np.void:
blob = np.array(blob, dtype=dtype.base)
# if array is empty we may need to reshape a little
if blob.size == 0 and not preserve_shape:
blob = blob.reshape((0, ) + dtype.shape)
else:
assert isinstance(blob, np.ndarray), (
'Invalid blob type: %s' % str(type(blob)))
# reshape scalars into 1D arrays
# TODO(azzolini): figure out better way of representing this
if len(blob.shape) == 0 and not preserve_shape:
blob = blob.reshape((1, ))
# infer inner shape from the blob given
# TODO(dzhulgakov): tweak this to make it work with PackedStruct
if (len(blob.shape) > 1 and dtype is not None and
dtype.base != np.void):
dtype = np.dtype((dtype.base, blob.shape[1:]))
# if we were still unable to infer the dtype
if dtype is None:
dtype = np.dtype(np.void)
assert not dtype.fields, (
'Cannot create Scalar with a structured dtype. ' +
'Use from_dtype instead.'
)
self.dtype = dtype
self._blob = blob
if metadata is not None:
self.set_metadata(metadata)
self._validate_metadata()
def set_type(self, dtype):
self._original_dtype = dtype
if dtype is not None:
self.dtype = np.dtype(dtype)
else:
self.dtype = np.dtype(np.void)
self._validate_metadata()
def _pprint_impl(self, indent, str_buffer):
str_buffer.write(' ' * (indent) +
'Scalar({!r}, {!r}, {!r})'.format(
self.dtype, self._blob, self._metadata) + "\n")
def id(self):
"""
Return the zero-indexed position of this scalar field in its schema.
Used in order to index into the field_blob list returned by readers or
accepted by writers.
"""
return self._child_base_id()
def Map(
keys,
values,
keys_name='keys',
values_name='values',
lengths_blob=None
):
"""A map is a List of Struct containing keys and values fields.
Optionally, you can provide custom name for the key and value fields.
"""
return List(
Struct((keys_name, keys), (values_name, values)),
lengths_blob=lengths_blob
)
def MapWithEvicted(
keys,
values,
keys_name='keys',
values_name='values',
lengths_blob=None,
evicted_values=None
):
"""A map with extra field evicted_values
"""
return ListWithEvicted(
Struct((keys_name, keys), (values_name, values)),
lengths_blob=lengths_blob,
evicted_values=evicted_values
)
def NamedTuple(name_prefix, *fields):
return Struct(* [('%s_%d' % (name_prefix, i), field)
for i, field in enumerate(fields)])
def Tuple(*fields):
"""
Creates a Struct with default, sequential, field names of given types.
"""
return NamedTuple('field', *fields)
def RawTuple(num_fields, name_prefix='field'):
"""
Creates a tuple of `num_field` untyped scalars.
"""
assert isinstance(num_fields, int)
assert num_fields >= 0
return NamedTuple(name_prefix, *([np.void] * num_fields))
def from_dtype(dtype, _outer_shape=()):
"""Constructs a Caffe2 schema from the given numpy's dtype.
Numpy supports scalar, array-like and structured datatypes, as long as
all the shapes are fixed. This function breaks down the given dtype into
a Caffe2 schema containing `Struct` and `Scalar` types.
Fields containing byte offsets are not currently supported.
"""
if not isinstance(dtype, np.dtype):
# wrap into a ndtype
shape = _outer_shape
dtype = np.dtype((dtype, _outer_shape))
else:
# concatenate shapes if necessary
shape = _outer_shape + dtype.shape
if shape != dtype.shape:
dtype = np.dtype((dtype.base, shape))
if not dtype.fields:
return Scalar(dtype)
struct_fields = []
for name, (fdtype, offset) in dtype.fields:
assert offset == 0, ('Fields with byte offsets are not supported.')
struct_fields += (name, from_dtype(fdtype, _outer_shape=shape))
return Struct(*struct_fields)
class _SchemaNode(object):
"""This is a private class used to represent a Schema Node"""
__slots__: Sequence[str] = ("name", "children", "type_str", "field")
def __init__(self, name, type_str=''):
self.name = name
self.children = []
self.type_str = type_str
self.field = None
def add_child(self, name, type_str=''):
for child in self.children:
if child.name == name and child.type_str == type_str:
return child
child = _SchemaNode(name, type_str)
self.children.append(child)
return child
def get_field(self):
list_names = ['lengths', 'values']
map_names = ['lengths', 'keys', 'values']
if len(self.children) == 0 or self.field is not None:
if self.field is None:
return Struct()
else:
return self.field
child_names = []
for child in self.children:
child_names.append(child.name)
if (set(child_names) == set(list_names)):
for child in self.children:
if child.name == 'values':
values_field = child.get_field()
else:
lengths_field = child.get_field()
self.field = List(
values_field,
lengths_blob=lengths_field
)
self.type_str = "List"
return self.field
elif (set(child_names) == set(map_names)):
for child in self.children:
if child.name == 'keys':
key_field = child.get_field()
elif child.name == 'values':
values_field = child.get_field()
else:
lengths_field = child.get_field()
self.field = Map(
key_field,
values_field,
lengths_blob=lengths_field
)
self.type_str = "Map"
return self.field
else:
struct_fields = []
for child in self.children:
struct_fields.append((child.name, child.get_field()))
self.field = Struct(*struct_fields)
self.type_str = "Struct"
return self.field
def print_recursively(self):
for child in self.children:
child.print_recursively()
logger.info("Printing node: Name and type")
logger.info(self.name)
logger.info(self.type_str)
def from_column_list(
col_names, col_types=None,
col_blobs=None, col_metadata=None
):
"""
Given a list of names, types, and optionally values, construct a Schema.
"""
if col_types is None:
col_types = [None] * len(col_names)
if col_metadata is None:
col_metadata = [None] * len(col_names)
if col_blobs is None:
col_blobs = [None] * len(col_names)
assert len(col_names) == len(col_types), (
'col_names and col_types must have the same length.'
)
assert len(col_names) == len(col_metadata), (
'col_names and col_metadata must have the same length.'
)
assert len(col_names) == len(col_blobs), (
'col_names and col_blobs must have the same length.'
)
root = _SchemaNode('root', 'Struct')
for col_name, col_type, col_blob, col_metadata in zip(
col_names, col_types, col_blobs, col_metadata
):
columns = col_name.split(FIELD_SEPARATOR)
current = root
for i in range(len(columns)):
name = columns[i]
type_str = ''
field = None
if i == len(columns) - 1:
type_str = col_type
field = Scalar(
dtype=col_type,
blob=col_blob,
metadata=col_metadata
)
next = current.add_child(name, type_str)
if field is not None:
next.field = field
current = next
return root.get_field()
def from_blob_list(schema, values, throw_on_type_mismatch=False):
"""
Create a schema that clones the given schema, but containing the given
list of values.
"""
assert isinstance(schema, Field), 'Argument `schema` must be a Field.'
if isinstance(values, BlobReference):
values = [values]
record = schema.clone_schema()
scalars = record.all_scalars()
assert len(scalars) == len(values), (
'Values must have %d elements, got %d.' % (len(scalars), len(values))
)
for scalar, value in zip(scalars, values):
scalar.set_value(value, throw_on_type_mismatch, unsafe=True)
return record
def as_record(value):
if isinstance(value, Field):
return value
elif isinstance(value, list) or isinstance(value, tuple):
is_field_list = all(
f is tuple and len(f) == 2 and isinstance(f[0], basestring)
for f in value
)
if is_field_list:
return Struct(* [(k, as_record(v)) for k, v in value])
else:
return Tuple(* [as_record(f) for f in value])
elif isinstance(value, dict):
return Struct(* [(k, as_record(v)) for k, v in viewitems(value)])
else:
return _normalize_field(value)
def FetchRecord(blob_record, ws=None, throw_on_type_mismatch=False):
"""
Given a record containing BlobReferences, return a new record with same
schema, containing numpy arrays, fetched from the current active workspace.
"""
def fetch(v):
if ws is None:
return workspace.FetchBlob(str(v))
else:
return ws.blobs[str(v)].fetch()
assert isinstance(blob_record, Field)
field_blobs = blob_record.field_blobs()
assert all(isinstance(v, BlobReference) for v in field_blobs)
field_arrays = [fetch(value) for value in field_blobs]
return from_blob_list(blob_record, field_arrays, throw_on_type_mismatch)
def FeedRecord(blob_record, arrays, ws=None):
"""
Given a Record containing blob_references and arrays, which is either
a list of numpy arrays or a Record containing numpy arrays, feeds the
record to the current workspace.
"""
def feed(b, v):
if ws is None:
workspace.FeedBlob(str(b), v)
else:
ws.create_blob(str(b))
ws.blobs[str(b)].feed(v)
assert isinstance(blob_record, Field)
field_blobs = blob_record.field_blobs()
assert all(isinstance(v, BlobReference) for v in field_blobs)
if isinstance(arrays, Field):
# TODO: check schema
arrays = arrays.field_blobs()
assert len(arrays) == len(field_blobs), (
'Values must contain exactly %d ndarrays.' % len(field_blobs)
)
for blob, array in zip(field_blobs, arrays):
feed(blob, array)
def NewRecord(net, schema):
"""
Given a record of np.arrays, create a BlobReference for each one of them,
returning a record containing BlobReferences. The name of each returned blob
is NextScopedBlob(field_name), which guarantees unique name in the current
net. Use NameScope explicitly to avoid name conflictions between different
nets.
"""
if isinstance(schema, Scalar):
result = schema.clone()
result.set_value(
blob=net.NextScopedBlob('unnamed_scalar'),
unsafe=True,
)
return result
assert isinstance(schema, Field), 'Record must be a schema.Field instance.'
blob_refs = [
net.NextScopedBlob(prefix=name)
for name in schema.field_names()
]
return from_blob_list(schema, blob_refs)
def ConstRecord(net, array_record):
"""
Given a record of arrays, returns a record of blobs,
initialized with net.Const.
"""
blob_record = NewRecord(net, array_record)
for blob, array in zip(
blob_record.field_blobs(), array_record.field_blobs()
):
net.Const(array, blob)
return blob_record
def InitEmptyRecord(net, schema_or_record, enforce_types=False):
if not schema_or_record.has_blobs():
record = NewRecord(net, schema_or_record)
else:
record = schema_or_record
for blob_type, blob in zip(record.field_types(), record.field_blobs()):
try:
data_type = data_type_for_dtype(blob_type)
shape = [0] + list(blob_type.shape)
net.ConstantFill([], blob, shape=shape, dtype=data_type)
except TypeError:
logger.warning("Blob {} has type error".format(blob))
# If data_type_for_dtype doesn't know how to resolve given numpy
# type to core.DataType, that function can throw type error (for
# example that would happen for cases of unknown types such as
# np.void). This is not a problem for cases when the record if going
# to be overwritten by some operator later, though it might be an
# issue for type/shape inference.
if enforce_types:
raise
# If we don't enforce types for all items we'll create a blob with
# the default ConstantFill (FLOAT, no shape)
net.ConstantFill([], blob, shape=[0])
return record
_DATA_TYPE_FOR_DTYPE = [
(np.str, core.DataType.STRING),
(np.float16, core.DataType.FLOAT16),
(np.float32, core.DataType.FLOAT),
(np.float64, core.DataType.DOUBLE),
(np.bool, core.DataType.BOOL),
(np.int8, core.DataType.INT8),
(np.int16, core.DataType.INT16),
(np.int32, core.DataType.INT32),
(np.int64, core.DataType.INT64),
(np.uint8, core.DataType.UINT8),
(np.uint16, core.DataType.UINT16),
]
def is_schema_subset(schema, original_schema):
# TODO add more checks
return set(schema.field_names()).issubset(
set(original_schema.field_names()))
def equal_schemas(schema,
original_schema,
check_field_names=True,
check_field_types=True,
check_field_metas=False):
assert isinstance(schema, Field)
assert isinstance(original_schema, Field)
if check_field_names and (
schema.field_names() != original_schema.field_names()):
return False
if check_field_types and (
schema.field_types() != original_schema.field_types()):
return False
if check_field_metas and (
schema.field_metadata() != original_schema.field_metadata()):
return False
return True
def schema_check(schema, previous=None):
record = as_record(schema)
if previous is not None:
assert equal_schemas(schema, previous)
return record
def data_type_for_dtype(dtype):
for np_type, dt in _DATA_TYPE_FOR_DTYPE:
if dtype.base == np_type:
return dt
raise TypeError('Unknown dtype: ' + str(dtype.base))
def dtype_for_core_type(core_type):
for np_type, dt in _DATA_TYPE_FOR_DTYPE:
if dt == core_type:
return np_type
raise TypeError('Unknown core type: ' + str(core_type))
def attach_metadata_to_scalars(field, metadata):
for f in field.all_scalars():
f.set_metadata(metadata)
|
pytorch-master
|
caffe2/python/schema.py
|
import errno
import os
from subprocess import PIPE, Popen
import caffe2.python._import_c_extension as C
from caffe2.proto import caffe2_pb2
from caffe2.python import core
class NNModule(object):
def __init__(self, net=None, device_map=None):
if net is not None:
serialized_proto = None
if isinstance(net, core.Net):
serialized_proto = net.Proto().SerializeToString()
elif isinstance(net, caffe2_pb2.NetDef):
serialized_proto = net.SerializeToString()
# Distributed
if device_map is not None:
serialized_device_map = {}
for k in device_map:
serialized_device_map[k] = device_map[k].SerializeToString()
self._NNModule = C.NNModuleFromProtobufDistributed(
serialized_proto, serialized_device_map
)
# Default
elif serialized_proto:
self._NNModule, self._OpList = C.NNModuleFromProtobuf(serialized_proto)
else:
raise Exception(
"NNModule can be constructed with core.Net or caffe2_pb2.NetDef types"
)
else:
self._NNModule = C.NNModule()
@property
def dataFlow(self):
return self._NNModule.dataFlow()
@property
def controlFlow(self):
return self._NNModule.getExecutionOrder()
@property
def nodes(self):
return self._NNModule.dataFlow().nodes
@property
def operators(self):
return self._NNModule.dataFlow().operators
@property
def tensors(self):
return self._NNModule.dataFlow().tensors
def createNode(self, val):
return self._NNModule.dataFlow().createNode(val)
def deleteNode(self, node):
return self._NNModule.dataFlow().deleteNode(node)
def createEdge(self, a, b):
return self._NNModule.dataFlow().createEdge(a, b)
def deleteEdge(self, a, b=None):
if b:
self._NNModule.dataFlow().deleteEdge(a, b)
else:
self._NNModule.dataFlow().deleteEdge(a)
def replaceNode(self, old_node, new_node):
return self._NNModule.dataFlow().replaceNode(old_node, new_node)
def replaceProducer(self, tensor, new_producer):
C.replaceProducer(tensor, new_producer)
def replaceAllUsesWith(self, old_tensor, new_tensor):
C.replaceAllUsesWith(old_tensor, new_tensor)
def replaceAsConsumer(self, old_consumer, new_consumer):
C.replaceAsConsumer(old_consumer, new_consumer)
def replaceSubgraph(self, subgraph, new_node, inputs, outputs):
self._NNModule.replaceSubgraph(subgraph, new_node, inputs, outputs)
def deleteSubgraph(self, subgraph):
self._NNModule.deleteSubgraph(subgraph)
def createUniqueDataNode(self, prefix="_unique"):
return self._NNModule.createUniqueDataNode(prefix)
def convertToCaffe2Proto(self, old_proto=None):
if not old_proto:
old_proto = caffe2_pb2.NetDef()
output = self._NNModule.convertToCaffe2Proto(old_proto)
new_proto = caffe2_pb2.NetDef()
new_proto.ParseFromString(output)
return new_proto
def match(self, pattern):
for n in self.dataFlow.getMutableNodes():
m = C.matchSubgraph(n, pattern)
if m:
yield m
def render(s):
s = str(s)
cmd_exists = lambda x: any(
os.access(os.path.join(path, x), os.X_OK)
for path in os.getenv("PATH", "").split(os.pathsep)
)
if cmd_exists("graph-easy"):
p = Popen("graph-easy", stdin=PIPE)
try:
p.stdin.write(s.encode("utf-8"))
except IOError as e:
if e.errno == errno.EPIPE or e.errno == errno.EINVAL:
pass
else:
# Raise any other error.
raise
p.stdin.close()
p.wait()
else:
print(s)
NeuralNetOperator = C.NeuralNetOperator
Operator = C.NeuralNetOperator
NeuralNetData = C.NeuralNetData
Data = C.NeuralNetData
NNSubgraph = C.NNSubgraph
NNMatchGraph = C.NNMatchGraph
Graph = C.Graph
Annotation = C.Annotation
|
pytorch-master
|
caffe2/python/nomnigraph.py
|
from caffe2.python.schema import (
Struct, FetchRecord, NewRecord, FeedRecord, InitEmptyRecord)
from caffe2.python import core, workspace
from caffe2.python.session import LocalSession
from caffe2.python.dataset import Dataset
from caffe2.python.pipeline import pipe
from caffe2.python.task import TaskGroup
from caffe2.python.test_util import TestCase
import numpy as np
class TestLocalSession(TestCase):
def test_local_session(self):
init_net = core.Net('init')
src_values = Struct(
('uid', np.array([1, 2, 6])),
('value', np.array([1.4, 1.6, 1.7])))
expected_dst = Struct(
('uid', np.array([2, 4, 12])),
('value', np.array([0.0, 0.0, 0.0])))
with core.NameScope('init'):
src_blobs = NewRecord(init_net, src_values)
dst_blobs = InitEmptyRecord(init_net, src_values.clone_schema())
def proc1(rec):
net = core.Net('proc1')
with core.NameScope('proc1'):
out = NewRecord(net, rec)
net.Add([rec.uid(), rec.uid()], [out.uid()])
out.value.set(blob=rec.value(), unsafe=True)
return [net], out
def proc2(rec):
net = core.Net('proc2')
with core.NameScope('proc2'):
out = NewRecord(net, rec)
out.uid.set(blob=rec.uid(), unsafe=True)
net.Sub([rec.value(), rec.value()], [out.value()])
return [net], out
src_ds = Dataset(src_blobs)
dst_ds = Dataset(dst_blobs)
with TaskGroup() as tg:
out1 = pipe(src_ds.reader(), processor=proc1)
out2 = pipe(out1, processor=proc2)
pipe(out2, dst_ds.writer())
ws = workspace.C.Workspace()
FeedRecord(src_blobs, src_values, ws)
session = LocalSession(ws)
session.run(init_net)
session.run(tg)
output = FetchRecord(dst_blobs, ws=ws)
for a, b in zip(output.field_blobs(), expected_dst.field_blobs()):
np.testing.assert_array_equal(a, b)
|
pytorch-master
|
caffe2/python/session_test.py
|
pytorch-master
|
caffe2/python/serialized_test/__init__.py
|
|
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import os
import tempfile
from zipfile import ZipFile
'''
Generates a document in markdown format summrizing the coverage of serialized
testing. The document lives in
`caffe2/python/serialized_test/SerializedTestCoverage.md`
'''
OpSchema = workspace.C.OpSchema
def gen_serialized_test_coverage(source_dir, output_dir):
(covered, not_covered, schemaless) = gen_coverage_sets(source_dir)
num_covered = len(covered)
num_not_covered = len(not_covered)
num_schemaless = len(schemaless)
total_ops = num_covered + num_not_covered
with open(os.path.join(output_dir, 'SerializedTestCoverage.md'), 'w+') as f:
f.write('# Serialized Test Coverage Report\n')
f.write("This is an automatically generated file. Please see "
"`caffe2/python/serialized_test/README.md` for details. "
"In the case of merge conflicts, please rebase and regenerate.\n")
f.write('## Summary\n')
f.write(
'Serialized tests have covered {}/{} ({}%) operators\n\n'.format(
num_covered, total_ops,
(int)(num_covered / total_ops * 1000) / 10))
f.write('## Not covered operators\n')
f.write('<details>\n')
f.write(
'<summary>There are {} not covered operators</summary>\n\n'.format(
num_not_covered))
for n in sorted(not_covered):
f.write('* ' + n + '\n')
f.write('</details>\n\n')
f.write('## Covered operators\n')
f.write('<details>\n')
f.write(
'<summary>There are {} covered operators</summary>\n\n'.format(
num_covered))
for n in sorted(covered):
f.write('* ' + n + '\n')
f.write('</details>\n\n')
f.write('## Excluded from coverage statistics\n')
f.write('### Schemaless operators\n')
f.write('<details>\n')
f.write(
'<summary>There are {} schemaless operators</summary>\n\n'.format(
num_schemaless))
for n in sorted(schemaless):
f.write('* ' + n + '\n')
f.write('</details>\n\n')
def gen_coverage_sets(source_dir):
covered_ops = gen_covered_ops(source_dir)
not_covered_ops = set()
schemaless_ops = []
for op_name in core._GetRegisteredOperators():
s = OpSchema.get(op_name)
if s is not None and s.private:
continue
if s:
if op_name not in covered_ops:
not_covered_ops.add(op_name)
else:
if op_name.find("_ENGINE_") == -1:
schemaless_ops.append(op_name)
return (covered_ops, not_covered_ops, schemaless_ops)
def gen_covered_ops(source_dir):
def parse_proto(x):
proto = caffe2_pb2.OperatorDef()
proto.ParseFromString(x)
return proto
covered = set()
for f in os.listdir(source_dir):
zipfile = os.path.join(source_dir, f)
if not os.path.isfile(zipfile):
continue
temp_dir = tempfile.mkdtemp()
with ZipFile(zipfile) as z:
z.extractall(temp_dir)
op_path = os.path.join(temp_dir, 'op.pb')
with open(op_path, 'rb') as f:
loaded_op = f.read()
op_proto = parse_proto(loaded_op)
covered.add(op_proto.type)
index = 0
grad_path = os.path.join(temp_dir, 'grad_{}.pb'.format(index))
while os.path.isfile(grad_path):
with open(grad_path, 'rb') as f:
loaded_grad = f.read()
grad_proto = parse_proto(loaded_grad)
covered.add(grad_proto.type)
index += 1
grad_path = os.path.join(temp_dir, 'grad_{}.pb'.format(index))
return covered
|
pytorch-master
|
caffe2/python/serialized_test/coverage.py
|
import inspect
import os
import shutil
import sys
import tempfile
import threading
from contextlib import contextmanager
from zipfile import ZipFile
import argparse
import hypothesis as hy
import numpy as np
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto import caffe2_pb2
from caffe2.python import gradient_checker
from caffe2.python.serialized_test import coverage
operator_test_type = 'operator_test'
TOP_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_SUFFIX = 'data'
DATA_DIR = os.path.join(TOP_DIR, DATA_SUFFIX)
_output_context = threading.local()
def given(*given_args, **given_kwargs):
def wrapper(f):
hyp_func = hy.seed(0)(hy.settings(max_examples=1)(hy.given(*given_args, **given_kwargs)(f)))
fixed_seed_func = hy.seed(0)(hy.settings(max_examples=1)(hy.given(
*given_args, **given_kwargs)(f)))
def func(self, *args, **kwargs):
self.should_serialize = True
fixed_seed_func(self, *args, **kwargs)
self.should_serialize = False
hyp_func(self, *args, **kwargs)
return func
return wrapper
def _getGradientOrNone(op_proto):
try:
grad_ops, _ = gradient_checker.getGradientForOp(op_proto)
return grad_ops
except Exception:
return []
# necessary to support converting jagged lists into numpy arrays
def _transformList(l):
ret = np.empty(len(l), dtype=np.object)
for (i, arr) in enumerate(l):
ret[i] = arr
return ret
def _prepare_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
class SerializedTestCase(hu.HypothesisTestCase):
should_serialize = False
def get_output_dir(self):
output_dir_arg = getattr(_output_context, 'output_dir', DATA_DIR)
output_dir = os.path.join(
output_dir_arg, operator_test_type)
if os.path.exists(output_dir):
return output_dir
# fall back to pwd
cwd = os.getcwd()
serialized_util_module_components = __name__.split('.')
serialized_util_module_components.pop()
serialized_dir = '/'.join(serialized_util_module_components)
output_dir_fallback = os.path.join(cwd, serialized_dir, DATA_SUFFIX)
output_dir = os.path.join(
output_dir_fallback,
operator_test_type)
return output_dir
def get_output_filename(self):
class_path = inspect.getfile(self.__class__)
file_name_components = os.path.basename(class_path).split('.')
test_file = file_name_components[0]
function_name_components = self.id().split('.')
test_function = function_name_components[-1]
return test_file + '.' + test_function
def serialize_test(self, inputs, outputs, grad_ops, op, device_option):
output_dir = self.get_output_dir()
test_name = self.get_output_filename()
full_dir = os.path.join(output_dir, test_name)
_prepare_dir(full_dir)
inputs = _transformList(inputs)
outputs = _transformList(outputs)
device_type = int(device_option.device_type)
op_path = os.path.join(full_dir, 'op.pb')
grad_paths = []
inout_path = os.path.join(full_dir, 'inout')
with open(op_path, 'wb') as f:
f.write(op.SerializeToString())
for (i, grad) in enumerate(grad_ops):
grad_path = os.path.join(full_dir, 'grad_{}.pb'.format(i))
grad_paths.append(grad_path)
with open(grad_path, 'wb') as f:
f.write(grad.SerializeToString())
np.savez_compressed(
inout_path,
inputs=inputs,
outputs=outputs,
device_type=device_type)
with ZipFile(os.path.join(output_dir, test_name + '.zip'), 'w') as z:
z.write(op_path, 'op.pb')
z.write(inout_path + '.npz', 'inout.npz')
for path in grad_paths:
z.write(path, os.path.basename(path))
shutil.rmtree(full_dir)
def compare_test(self, inputs, outputs, grad_ops, atol=1e-7, rtol=1e-7):
def parse_proto(x):
proto = caffe2_pb2.OperatorDef()
proto.ParseFromString(x)
return proto
source_dir = self.get_output_dir()
test_name = self.get_output_filename()
temp_dir = tempfile.mkdtemp()
with ZipFile(os.path.join(source_dir, test_name + '.zip')) as z:
z.extractall(temp_dir)
op_path = os.path.join(temp_dir, 'op.pb')
inout_path = os.path.join(temp_dir, 'inout.npz')
# load serialized input and output
loaded = np.load(inout_path, encoding='bytes', allow_pickle=True)
loaded_inputs = loaded['inputs'].tolist()
inputs_equal = True
for (x, y) in zip(inputs, loaded_inputs):
if not np.array_equal(x, y):
inputs_equal = False
loaded_outputs = loaded['outputs'].tolist()
# if inputs are not the same, run serialized input through serialized op
if not inputs_equal:
# load operator
with open(op_path, 'rb') as f:
loaded_op = f.read()
op_proto = parse_proto(loaded_op)
device_type = loaded['device_type']
device_option = caffe2_pb2.DeviceOption(
device_type=int(device_type))
outputs = hu.runOpOnInput(device_option, op_proto, loaded_inputs)
grad_ops = _getGradientOrNone(op_proto)
# assert outputs are equal
for (x, y) in zip(outputs, loaded_outputs):
np.testing.assert_allclose(x, y, atol=atol, rtol=rtol)
# assert gradient op is equal
for i in range(len(grad_ops)):
grad_path = os.path.join(temp_dir, 'grad_{}.pb'.format(i))
with open(grad_path, 'rb') as f:
loaded_grad = f.read()
grad_proto = parse_proto(loaded_grad)
self._assertSameOps(grad_proto, grad_ops[i])
shutil.rmtree(temp_dir)
def _assertSameOps(self, op1, op2):
op1_ = caffe2_pb2.OperatorDef()
op1_.CopyFrom(op1)
op1_.arg.sort(key=lambda arg: arg.name)
op2_ = caffe2_pb2.OperatorDef()
op2_.CopyFrom(op2)
op2_.arg.sort(key=lambda arg: arg.name)
self.assertEqual(op1_, op2_)
def assertSerializedOperatorChecks(
self,
inputs,
outputs,
gradient_operator,
op,
device_option,
atol=1e-7,
rtol=1e-7,
):
if self.should_serialize:
if getattr(_output_context, 'should_generate_output', False):
self.serialize_test(
inputs, outputs, gradient_operator, op, device_option)
if not getattr(_output_context, 'disable_gen_coverage', False):
coverage.gen_serialized_test_coverage(
self.get_output_dir(), TOP_DIR)
else:
self.compare_test(
inputs, outputs, gradient_operator, atol, rtol)
def assertReferenceChecks(
self,
device_option,
op,
inputs,
reference,
input_device_options=None,
threshold=1e-4,
output_to_grad=None,
grad_reference=None,
atol=None,
outputs_to_check=None,
ensure_outputs_are_inferred=False,
):
outs = super(SerializedTestCase, self).assertReferenceChecks(
device_option,
op,
inputs,
reference,
input_device_options,
threshold,
output_to_grad,
grad_reference,
atol,
outputs_to_check,
ensure_outputs_are_inferred,
)
if not getattr(_output_context, 'disable_serialized_check', False):
grad_ops = _getGradientOrNone(op)
rtol = threshold
if atol is None:
atol = threshold
self.assertSerializedOperatorChecks(
inputs,
outs,
grad_ops,
op,
device_option,
atol,
rtol,
)
@contextmanager
def set_disable_serialized_check(self, val: bool):
orig = getattr(_output_context, 'disable_serialized_check', False)
try:
# pyre-fixme[16]: `local` has no attribute `disable_serialized_check`.
_output_context.disable_serialized_check = val
yield
finally:
_output_context.disable_serialized_check = orig
def testWithArgs():
parser = argparse.ArgumentParser()
parser.add_argument(
'-G', '--generate-serialized', action='store_true', dest='generate',
help='generate output files (default=false, compares to current files)')
parser.add_argument(
'-O', '--output', default=DATA_DIR,
help='output directory (default: %(default)s)')
parser.add_argument(
'-D', '--disable-serialized_check', action='store_true', dest='disable',
help='disable checking serialized tests')
parser.add_argument(
'-C', '--disable-gen-coverage', action='store_true',
dest='disable_coverage',
help='disable generating coverage markdown file')
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
sys.argv[1:] = args.unittest_args
_output_context.__setattr__('should_generate_output', args.generate)
_output_context.__setattr__('output_dir', args.output)
_output_context.__setattr__('disable_serialized_check', args.disable)
_output_context.__setattr__('disable_gen_coverage', args.disable_coverage)
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/serialized_test/serialized_test_util.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import assume, given, settings
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class PoolTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
method=st.sampled_from(["MaxPool", "AveragePool"]),
**mu.gcs)
@settings(deadline=10000)
def test_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
assume(pad < kernel)
op = core.CreateOperator(
method,
["X"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
device_option=dc[0],
)
X = np.random.rand(
batch_size, input_channels, size, size
).astype(np.float32)
self.assertDeviceChecks(dc, op, [X], [0])
if 'MaxPool' not in method:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
method=st.sampled_from(["MaxPool", "AveragePool"]),
**mu.gcs_cpu_ideep)
def test_int8_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
assume(pad < kernel)
pool_fp32 = core.CreateOperator(
method,
["X"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
device_option=dc[0]
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
if X.min() >=0:
scale = np.absolute(X).max() / 0xFF
zero_point = 0
else:
scale = np.absolute(X).max() / 0x7F
zero_point = 128
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob("X", X, dc[0])
workspace.RunOperatorOnce(pool_fp32)
Y = workspace.FetchBlob("Y")
workspace.ResetWorkspace()
sw2nhwc = core.CreateOperator(
"NCHW2NHWC",
["Xi"],
["Xi_nhwc"],
device_option=dc[1]
)
quantize = core.CreateOperator(
"Int8Quantize",
["Xi_nhwc"],
["Xi_quantized"],
engine="DNNLOWP",
device_option=dc[1],
Y_zero_point=zero_point,
Y_scale=scale,
)
pool = core.CreateOperator(
"Int8{}".format(method),
["Xi_quantized"],
["Y_quantized"],
stride=stride,
pad=pad,
kernel=kernel,
engine="DNNLOWP",
device_option=dc[1],
)
dequantize = core.CreateOperator(
"Int8Dequantize",
["Y_quantized"],
["Y_nhwc"],
engine="DNNLOWP",
device_option=dc[1],
)
sw2nchw = core.CreateOperator(
"NHWC2NCHW",
["Y_nhwc"],
["Y_out"],
device_option=dc[1]
)
net = caffe2_pb2.NetDef()
net.op.extend([sw2nhwc, quantize, pool, dequantize, sw2nchw])
workspace.FeedBlob("Xi", X, dc[1])
workspace.RunNetOnce(net)
Y_out = workspace.FetchBlob("Y_out")
MSE = np.square(np.subtract(Y, Y_out)).mean()
if MSE > 0.005:
print(Y.flatten())
print(Y_out.flatten())
print(np.max(np.abs(Y_out - Y)))
print("MSE", MSE)
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/pool_op_test.py
|
from caffe2.python.test_util import TestCase
from caffe2.proto import caffe2_pb2
import unittest
import numpy as np
from caffe2.python import core, workspace
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestReShapeOps(TestCase):
def test_reshape_ops(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
workspace.FeedBlob('res', np.array([[0, 0, 0, 0]], dtype=np.float32))
workspace.FeedBlob('shape', np.array([1, 4], dtype=np.int32), core.DeviceOption(caffe2_pb2.CPU, 0))
workspace.FeedBlob('input', np.zeros((2, 2), dtype=np.float32))
workspace.RunOperatorOnce(core.CreateOperator(
'Reshape', ['input', 'shape'], ['output', 'old_shape']))
assert ((workspace.FetchBlob('output') ==
workspace.FetchBlob('res')).all())
def test_basic_reshape(self):
_test_reshape(old_shape=(4, 2, 1), new_shape=(2, 4))
_test_reshape(old_shape=(4, 2, 1), new_shape=(2, 4), arg_shape=False)
def test_int64_reshape_input(self):
_test_reshape(old_shape=(4, 2, 1), new_shape=(2, 4), arg_shape=False, shape_dtype=np.int64)
def test_missing_dim(self):
_test_reshape(old_shape=(4, 2, 1), new_shape=(-1, 8))
_test_reshape(old_shape=(4, 2, 1), new_shape=(-1, 8), arg_shape=False)
def test_in_place(self):
_test_reshape(old_shape=(4, 2, 1), new_shape=(-1, 8), in_place=True)
_test_reshape(old_shape=(4, 2, 1), new_shape=(-1, 8),
in_place=True, arg_shape=False)
def test_zero_dim(self):
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, 0, 0),
expected_shape=(4, 2, 1))
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, 0, 0),
expected_shape=(4, 2, 1), arg_shape=False)
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, 2, 1),
expected_shape=(4, 2, 1))
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, 2, 1),
expected_shape=(4, 2, 1), arg_shape=False)
def test_zero_dim_and_missing_dim(self):
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, -1, 0),
expected_shape=(4, 2, 1))
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, -1, 0),
expected_shape=(4, 2, 1), arg_shape=False)
_test_reshape(old_shape=(4, 3, 2), new_shape=(-1, 0),
expected_shape=(8, 3))
_test_reshape(old_shape=(4, 3, 2), new_shape=(-1, 0),
expected_shape=(8, 3), arg_shape=False)
def test_backprop(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
old_shape = (4, 2, 1)
new_shape = (1, 8)
X = np.random.rand(*old_shape).astype(np.float32)
Y = np.random.rand(*new_shape).astype(np.float32)
net = core.Net('net')
net.GivenTensorFill([], 'X', shape=old_shape, values=X.flatten())
net.GivenTensorFill([], 'Y', shape=new_shape, values=Y.flatten())
net.Reshape(['X'], ['X_out', 'old_shape'], shape=new_shape)
net.Mul(['X_out', 'Y'], 'Z')
net.AddGradientOperators(['Z'])
workspace.RunNetOnce(net)
Z = workspace.FetchBlob('Z')
X_grad = workspace.FetchBlob('X_grad')
# Check forward computation
np.testing.assert_allclose(
Z.squeeze(), (X.reshape(new_shape) * Y).squeeze(), rtol=1e-5)
# Check the shape of the gradient
np.testing.assert_array_equal(X_grad.shape, X.shape)
# Check the gradient
np.testing.assert_allclose(X_grad, Y.reshape(old_shape), rtol=1e-5)
def test_input_shape_changes(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
workspace.FeedBlob(
'input_blob',
np.array(np.random.rand(10, 20, 10), dtype=np.float32))
net = core.Net('mynet')
z, _ = net.Reshape('input_blob',
['z_reshape', 'dummy_size'],
shape=(-1, 10))
workspace.CreateNet(net)
workspace.RunNet(net)
workspace.FeedBlob(
'input_blob',
np.array(np.random.rand(10, 40, 10), dtype=np.float32))
workspace.RunNet(net)
def _test_reshape(old_shape, new_shape, expected_shape=None, arg_shape=True,
in_place=False, shape_dtype=np.int32):
devices = [core.DeviceOption(caffe2_pb2.IDEEP, 0)]
for device_opt in devices:
with core.DeviceScope(device_opt):
if expected_shape is None:
expected_shape = new_shape
X = np.random.rand(*old_shape).astype(np.float32)
blob_in = 'X'
blob_out = blob_in if in_place else blob_in + '_out'
if arg_shape:
op = core.CreateOperator('Reshape',
[blob_in],
[blob_out, 'old_shape'],
shape=new_shape)
else:
op = core.CreateOperator('Reshape',
[blob_in, 'new_shape'],
[blob_out, 'old_shape'])
workspace.FeedBlob('new_shape', np.asarray(new_shape, dtype=shape_dtype),
core.DeviceOption(caffe2_pb2.CPU, 0))
workspace.FeedBlob(blob_in, X)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob(blob_out)
np.testing.assert_allclose(Y, X.reshape(expected_shape))
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/reshape_op_test.py
|
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestAdamOps(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=4),
ITER=st.integers(min_value=0, max_value=10000),
LR=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta1=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta2=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**mu.gcs)
def test_adam(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
param, mom1, mom2, grad = inputs
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
mom2 = np.absolute(mom2)
op = core.CreateOperator(
"Adam",
["param", "mom1", "mom2", "grad", "lr", "iter"],
["output_param", "output_mom1", "output_mom2"],
beta1=beta1, beta2=beta2, epsilon=epsilon)
# Iter lives on the CPU
input_device_options = {'iter': hu.cpu_do, 'lr': hu.cpu_do}
self.assertDeviceChecks(
dc, op,
[param, mom1, mom2, grad, LR, ITER],
[0],
input_device_options=input_device_options,
threshold=0.001)
@given(inputs=hu.tensors(n=4),
ITER=st.integers(min_value=0, max_value=10000),
LR=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta1=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta2=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**mu.gcs)
def test_adam_output_grad(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
param, mom1, mom2, grad = inputs
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
mom2 = np.absolute(mom2)
op = core.CreateOperator(
"Adam",
["param", "mom1", "mom2", "grad", "lr", "iter"],
["output_param", "output_mom1", "output_mom2", "output_grad"],
beta1=beta1, beta2=beta2, epsilon=epsilon)
# Iter lives on the CPU
input_device_options = {'iter': hu.cpu_do, 'lr': hu.cpu_do}
self.assertDeviceChecks(
dc, op,
[param, mom1, mom2, grad, LR, ITER],
[0],
input_device_options=input_device_options,
threshold=0.001)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/adam_op_test.py
|
import unittest
import numpy as np
from random import randint
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class CopyTest(unittest.TestCase):
def _get_deep_device(self):
return caffe2_pb2.DeviceOption(device_type=caffe2_pb2.IDEEP)
def test_copy_to_ideep(self):
op = core.CreateOperator(
"CopyCPUToIDEEP",
["X"],
["X_ideep"],
)
op.device_option.CopyFrom(self._get_deep_device())
n = randint(1, 128)
c = randint(1, 64)
h = randint(1, 128)
w = randint(1, 128)
X = np.random.rand(n, c, h, w).astype(np.float32)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(op)
X_ideep = workspace.FetchBlob("X_ideep")
np.testing.assert_allclose(X, X_ideep)
def test_copy_to_ideep_zero_dim(self):
op = core.CreateOperator(
"CopyCPUToIDEEP",
["X"],
["X_ideep"],
)
op.device_option.CopyFrom(self._get_deep_device())
n = 0
c = randint(1, 128)
X = np.random.rand(n, c).astype(np.float32)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(op)
X_ideep = workspace.FetchBlob("X_ideep")
np.testing.assert_allclose(X, X_ideep)
def test_copy_from_ideep(self):
op = core.CreateOperator(
"CopyIDEEPToCPU",
["X_ideep"],
["X"],
)
op.device_option.CopyFrom(self._get_deep_device())
n = randint(1, 128)
c = randint(1, 64)
h = randint(1, 128)
w = randint(1, 128)
X = np.random.rand(n, c, h, w).astype(np.float32)
workspace.FeedBlob("X_ideep", X, self._get_deep_device())
workspace.RunOperatorOnce(op)
X_ideep = workspace.FetchBlob("X")
np.testing.assert_allclose(X, X_ideep)
def test_copy_from_ideep_zero_dim(self):
op = core.CreateOperator(
"CopyIDEEPToCPU",
["X_ideep"],
["X"],
)
op.device_option.CopyFrom(self._get_deep_device())
n = 0
c = randint(1, 64)
X = np.random.rand(n, c).astype(np.float32)
workspace.FeedBlob("X_ideep", X, self._get_deep_device())
workspace.RunOperatorOnce(op)
X_ideep = workspace.FetchBlob("X")
np.testing.assert_allclose(X, X_ideep)
def test_copy_from_ideep_fallthrough(self):
op = core.CreateOperator(
"CopyIDEEPToCPU",
["X_ideep"],
["X"],)
op.device_option.CopyFrom(self._get_deep_device())
n = randint(1, 128)
c = randint(1, 64)
h = randint(1, 128)
w = randint(1, 128)
X = np.random.rand(n, c, h, w).astype(np.float32)
workspace.FeedBlob("X_ideep", X)
workspace.RunOperatorOnce(op)
X_ideep = workspace.FetchBlob("X")
np.testing.assert_allclose(X, X_ideep)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/copy_op_test.py
|
import argparse
import copy
import json
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, utils
import caffe2.python._import_c_extension as C
def pairwise(iterable):
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
def blob_uses(net, blob):
u = []
for i, op in enumerate(net.op):
if blob in op.input or blob in op.control_input:
u.append(i)
return u
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 optimization")
parser.add_argument("--init_net",
type=argparse.FileType('rb'),
help="init net")
parser.add_argument("--pred_net",
type=argparse.FileType('rb'),
help="predict net")
parser.add_argument("--verify_input",
type=argparse.FileType('r'),
help="input dims for verification")
parser.add_argument("--fuse_bn", default=False, action='store_true')
parser.add_argument("--fuse_mul_add", default=False, action='store_true')
parser.add_argument("--fuse_conv_relu", default=False, action='store_true')
return parser
def fuse_first_bn(net, params, removed_tensors):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if next_.input[0] != current.output[0]:
continue
if current.type not in ("Conv", "ConvTranspose") \
or next_.type != "SpatialBN":
continue
if len(blob_uses(net, current.output[0])) != 1:
# Can't fuse if more than one user
continue
# else, can fuse
conv = current
bn = next_
fused_conv = copy.deepcopy(conv)
fused_conv.output[0] = bn.output[0]
# Fix fused_conv to ensure we have a bias passed.
if len(fused_conv.input) != 3:
bias_name = "{}_bias".format(conv.input[1])
net.external_input.extend([bias_name])
fused_conv.input.extend([bias_name])
for arg in fused_conv.arg:
if arg.name == "no_bias":
arg.i = 0
conv_weight = params[conv.input[1]]
conv_bias = params[conv.input[2]] if len(conv.input) == 3 \
else np.zeros(shape=(conv_weight.shape[0])).astype(np.float32)
bn_scale = params[bn.input[1]]
bn_bias = params[bn.input[2]]
bn_running_mean = params[bn.input[3]]
bn_running_var = params[bn.input[4]]
# First, BN computation can be phrased as follows:
# (X - running_mean) * (1.0 / sqrt(running_var + eps)) *
# bn_scale + bias
# Thus, we can rewrite bn_scale as:
# X * bn_scale * 1.0 / (sqrt(running_var + eps)) + (bias -
# running_mean * (1.0 / sqrt(running_var + eps)) * bn_scale)
# Thus, can just have the affine transform
# X * A + B
# where
# A = bn_scale * 1.0 / (sqrt(running_var + eps))
# B = (bias - running_mean * (1.0 / sqrt(running_var + eps))
# * bn_scale)
eps = 1.0e-5
for arg in bn.arg:
if arg.name == "epsilon":
eps = arg.f
A = bn_scale * 1.0 / (np.sqrt(bn_running_var + eps))
B = bn_bias - bn_running_mean * A
# This identify should hold if we have correctly fused
# np.testing.assert_array_equal(
# params[conv.output[0]] * A + B,
# params[bn.output[0]])
# Now, we have that the computation made is the following:
# ((X `conv` W) + b) * A + B
# Then, we can simply fuse this as follows:
# (X `conv` (W * A)) + b * A + B
# which is simply
# (X `conv` Q) + C
# where
# Q = W * A
# C = b * A + B
# For ConvTranspose, from the view of convolutions as a
# Toepeliz multiplication, we have W_ = W^T, so the weights
# are laid out as (R, S, K, K) (vs (S, R, K, K) for a Conv),
# so the weights broadcast slightly differently. Remember, our
# BN scale 'B' is of size (S,)
A_ = A.reshape(-1, 1, 1, 1) if conv.type == "Conv" else \
A.reshape(1, -1, 1, 1)
C = conv_bias * A + B
Q = conv_weight * A_
params[fused_conv.input[1]] = Q
params[fused_conv.input[2]] = C
new_ops = net.op[:i] + [fused_conv] + net.op[j + 1:]
del net.op[:]
removed_tensors.append(bn.input[1])
removed_tensors.append(bn.input[2])
removed_tensors.append(bn.input[3])
removed_tensors.append(bn.input[4])
del params[bn.input[1]]
del params[bn.input[2]]
del params[bn.input[3]]
del params[bn.input[4]]
net.op.extend(new_ops)
break
return net, params, removed_tensors
def fuse_bn(net, params, ignore_failure):
# Run until we hit a fixed point
removed_tensors = []
while True:
(next_net, next_params, removed_tensors) = \
fuse_first_bn(net, params, removed_tensors)
if len(next_net.op) == len(net.op):
if (
any(op.type == "SpatialBN" for op in next_net.op) and
not ignore_failure
):
raise Exception(
"Model contains SpatialBN op after fusion: %s", next_net)
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def fuse_first_mul_add(net, params, removed_tensors):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if current.type != "Mul" or next_.type != "Add":
continue
if next_.input[0] != current.output[0]:
raise Exception("Failure to fuse")
if len(blob_uses(net, current.output[0])) != 1:
raise Exception("Failure to fuse")
log.info("Fusing at index %s", i)
mul_ = current
add_ = next_
batch_norm = copy.deepcopy(mul_)
batch_norm.type = "SpatialBN"
batch_norm.arg.extend([utils.MakeArgument("is_test", 1)])
batch_norm.arg.extend([utils.MakeArgument("epsilon", float(1e-9))])
def s(x):
return "{}{}".format(add_.output[0], x)
fake_mean = s("_mean")
fake_var = s("_var")
del batch_norm.input[:]
batch_norm.input.extend([mul_.input[0],
mul_.input[1],
add_.input[1],
fake_mean,
fake_var])
params[fake_mean] = np.zeros_like(params[mul_.input[1]])
params[fake_var] = np.ones_like(params[mul_.input[1]])
net.external_input.extend([fake_mean, fake_var])
batch_norm.output[0] = add_.output[0]
new_ops = net.op[:i] + [batch_norm] + net.op[j + 1:]
del net.op[:]
net.op.extend(new_ops)
break
return net, params, removed_tensors
def fuse_mul_add(net, params):
# Run until we hit a fixed point
removed_tensors = []
while True:
(next_net, next_params, removed_tensors) = \
fuse_first_mul_add(net, params, removed_tensors)
if len(next_net.op) == len(net.op):
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def add_tensor(net, name, blob):
''' Create an operator to store the tensor 'blob',
run the operator to put the blob to workspace.
uint8 is stored as an array of string with one element.
'''
kTypeNameMapper = {
np.dtype('float32'): "GivenTensorFill",
np.dtype('int32'): "GivenTensorIntFill",
np.dtype('int64'): "GivenTensorInt64Fill",
np.dtype('uint8'): "GivenTensorStringFill",
}
shape = blob.shape
values = blob
# pass array of uint8 as a string to save storage
# storing uint8_t has a large overhead for now
if blob.dtype == np.dtype('uint8'):
shape = [1]
values = [str(blob.data)]
op = core.CreateOperator(
kTypeNameMapper[blob.dtype],
[], [name],
arg=[
utils.MakeArgument("shape", shape),
utils.MakeArgument("values", values),
]
)
net.op.extend([op])
def gen_init_net_from_blobs(blobs):
''' Generate an initialization net based on a blob dict '''
ret = caffe2_pb2.NetDef()
for name, blob in blobs.items():
add_tensor(ret, name, blob)
return ret
def fuse_conv_relu(net):
net = copy.deepcopy(net)
device_option = core.DeviceOption(caffe2_pb2.IDEEP)
for op in net.op:
op.device_option.CopyFrom(device_option)
new_net = caffe2_pb2.NetDef()
new_net.ParseFromString(C.transform_optimizeForMKLDNN(net.SerializeToString()))
return new_net
def Optimize(args):
init_net = caffe2_pb2.NetDef()
predict_net = caffe2_pb2.NetDef()
init_net.ParseFromString(args.init_net.read())
predict_net.ParseFromString(args.pred_net.read())
workspace.ResetWorkspace()
workspace.RunNetOnce(init_net)
param_dict = {p: workspace.FetchBlob(p) for p in workspace.Blobs()}
external_inputs = {}
external_outputs = {}
if args.verify_input:
value_info = json.load(args.verify_input)
input_shapes = {k : v[-1] for (k, v) in value_info.items()}
print("input info: {}".format(input_shapes))
for k, v in input_shapes.items():
external_inputs[k] = np.random.randn(*v).astype(np.float32)
workspace.FeedBlob(k, external_inputs[k])
workspace.RunNetOnce(predict_net)
for o in predict_net.external_output:
external_outputs[o] = workspace.FetchBlob(o)
if args.fuse_mul_add:
predict_net, param_dict, _ = fuse_mul_add(predict_net, param_dict)
if args.fuse_bn:
predict_net, param_dict, _ = fuse_bn(predict_net, param_dict, False)
if args.fuse_conv_relu:
predict_net = fuse_conv_relu(predict_net)
external_outputs_opt = {}
if args.verify_input:
workspace.ResetWorkspace()
device_option = core.DeviceOption(caffe2_pb2.IDEEP) if args.fuse_conv_relu else core.DeviceOption(caffe2_pb2.CPU)
with core.DeviceScope(device_option):
for k, v in param_dict.items():
workspace.FeedBlob(k, v, device_option)
for k, v in external_inputs.items():
workspace.FeedBlob(k, v, device_option)
workspace.RunNetOnce(predict_net)
for o in predict_net.external_output:
external_outputs_opt[o] = workspace.FetchBlob(o)
assert np.allclose(external_outputs[o],
external_outputs_opt[o],
atol=1e-3,
rtol=1e-3)
for i, o in enumerate(predict_net.op):
print("op[{}]: {}".format(i, o.type))
init_net = gen_init_net_from_blobs(param_dict)
with open('init_net.pb', 'wb') as f:
f.write(init_net.SerializeToString())
with open('predict_net.pb', 'wb') as f:
f.write(predict_net.SerializeToString())
if __name__ == '__main__':
args = GetArgumentParser().parse_args()
Optimize(args)
|
pytorch-master
|
caffe2/python/ideep/transform_ideep_net.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ExpandDimsSqueezeTest(hu.HypothesisTestCase):
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
def test_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
op = core.CreateOperator(
"Squeeze", "X", "X" if inplace else "Y", dims=squeeze_dims
)
self.assertDeviceChecks(dc, op, [X], [0])
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs_cpu_ideep
)
def test_squeeze_fallback(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
op0 = core.CreateOperator(
"Squeeze",
"X0",
"X0" if inplace else "Y0",
dims=squeeze_dims,
device_option=dc[0]
)
workspace.FeedBlob('X0', X, dc[0])
workspace.RunOperatorOnce(op0)
Y0 = workspace.FetchBlob("X0" if inplace else "Y0")
op1 = core.CreateOperator(
"Squeeze",
"X1",
"X1" if inplace else "Y1",
dims=squeeze_dims,
device_option=dc[1]
)
workspace.FeedBlob('X1', X, dc[0])
workspace.RunOperatorOnce(op1)
Y1 = workspace.FetchBlob("X1" if inplace else "Y1")
if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
def test_expand_dims(self, squeeze_dims, inplace, gc, dc):
oshape = [
1 if dim in squeeze_dims else np.random.randint(2, 5)
for dim in range(4)
]
nshape = [s for s in oshape if s!=1]
expand_dims = [i for i in range(len(oshape)) if oshape[i]==1]
X = np.random.rand(*nshape).astype(np.float32)
op = core.CreateOperator(
"ExpandDims", "X", "X" if inplace else "Y", dims=expand_dims
)
self.assertDeviceChecks(dc, op, [X], [0])
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs_cpu_ideep
)
def test_expand_dims_fallback(self, squeeze_dims, inplace, gc, dc):
oshape = [
1 if dim in squeeze_dims else np.random.randint(2, 5)
for dim in range(4)
]
nshape = [s for s in oshape if s!=1]
expand_dims = [i for i in range(len(oshape)) if oshape[i]==1]
X = np.random.rand(*nshape).astype(np.float32)
op0 = core.CreateOperator(
"ExpandDims",
"X0",
"X0" if inplace else "Y0",
dims=expand_dims,
device_option=dc[0]
)
workspace.FeedBlob('X0', X, dc[0])
workspace.RunOperatorOnce(op0)
Y0 = workspace.FetchBlob("X0" if inplace else "Y0")
op1 = core.CreateOperator(
"ExpandDims",
"X1",
"X1" if inplace else "Y1",
dims=expand_dims,
device_option=dc[1]
)
workspace.FeedBlob('X1', X, dc[0])
workspace.RunOperatorOnce(op1)
Y1 = workspace.FetchBlob("X1" if inplace else "Y1")
if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/expanddims_squeeze_op_test.py
|
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestSpatialBN(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(7, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
inplace=st.sampled_from([True, False]),
**mu.gcs)
@settings(deadline=1000)
def test_spatialbn_test_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(7, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
inplace=st.sampled_from([True, False]),
**mu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
print("dc0: {}, dc1: {}".format(dc[0], dc[1]))
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["X" if inplace else "Y",
"running_mean", "running_var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
running_mean = np.random.randn(input_channels).astype(np.float32)
running_var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
# TODO: It looks like IDEEP spatial_bn op outputs save_var (output[4])
# as the reciprocal of CPU op's output. Need to check back and add
# output[4] for comparison
self.assertDeviceChecks(dc, op, [X, scale, bias, running_mean, running_var],
[0, 1, 2, 3])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**mu.gcs)
@settings(deadline=None, max_examples=50)
def test_spatialbn_train_mode_gradient_check(
self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
for input_to_check in [0, 1, 2]: # dX, dScale, dBias
self.assertGradientChecks(gc, op, [X, scale, bias, mean, var],
input_to_check, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/spatial_bn_op_test.py
|
import unittest
import numpy as np
import hypothesis.strategies as st
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
from hypothesis import given, settings
from caffe2.python import core, workspace
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class OrderSwitchTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 128),
c=st.integers(1, 64),
h=st.integers(1, 128),
w=st.integers(1, 128),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_nchw2nhwc(self, n, c, h, w, gc, dc):
op = core.CreateOperator(
"NCHW2NHWC",
["X"],
["Y"],
)
X = np.random.rand(n, c, h, w).astype(np.float32) - 0.5
self.assertDeviceChecks(dc, op, [X], [0])
@given(n=st.integers(1, 128),
c=st.integers(1, 64),
h=st.integers(1, 128),
w=st.integers(1, 128),
**mu.gcs)
@settings(deadline=None, max_examples=50)
def test_nhwc2nchw(self, n, c, h, w, gc, dc):
op0 = core.CreateOperator(
"NCHW2NHWC",
["X"],
["Y"],
)
op1 = core.CreateOperator(
"NHWC2NCHW",
["Y"],
["Z"],
)
X = np.random.rand(n, c, h, w).astype(np.float32) - 0.5
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X', X, dc[0])
op0.device_option.CopyFrom(dc[0])
op1.device_option.CopyFrom(dc[0])
workspace.RunOperatorOnce(op0)
workspace.RunOperatorOnce(op1)
Z0 = workspace.FetchBlob("Z")
workspace.ResetWorkspace()
workspace.FeedBlob('X', X, dc[1])
op0.device_option.CopyFrom(dc[1])
op1.device_option.CopyFrom(dc[1])
workspace.RunOperatorOnce(op0)
workspace.RunOperatorOnce(op1)
Z1 = workspace.FetchBlob("Z")
if not np.allclose(Z0, Z1, atol=0.01, rtol=0.01):
print(Z1.flatten())
print(Z0.flatten())
print(np.max(np.abs(Z1 - Z0)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/order_switch_op_test.py
|
import unittest
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class DropoutTest(hu.HypothesisTestCase):
@given(X=hu.tensor(),
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
**mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'],
ratio=ratio, is_test=True)
self.assertDeviceChecks(dc, op, [X], [0])
# No sense in checking gradients for test phase
def reference_dropout_test(x):
return x, np.ones(x.shape, dtype=np.bool)
self.assertReferenceChecks(
gc, op, [X], reference_dropout_test,
# The 'mask' output may be uninitialized
outputs_to_check=[0])
@given(X=hu.tensor(),
in_place=st.booleans(),
output_mask=st.booleans(),
**mu.gcs)
@unittest.skipIf(True, "Skip duo to different rand seed.")
def test_dropout_ratio0(self, X, in_place, output_mask, gc, dc):
"""Test with ratio=0 for a deterministic reference impl."""
is_test = not output_mask
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'] +
(['mask'] if output_mask else []),
ratio=0.0, is_test=is_test)
self.assertDeviceChecks(dc, op, [X], [0])
def reference_dropout_ratio0(x):
return (x,) if is_test else (x, np.ones(x.shape, dtype=np.bool))
self.assertReferenceChecks(
gc, op, [X], reference_dropout_ratio0, outputs_to_check=[0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/dropout_op_test.py
|
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.models.download import ModelDownloader
import numpy as np
import argparse
import time
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument("--model", type=str, help="The model to benchmark.")
parser.add_argument(
"--order",
type=str,
default="NCHW",
help="The order to evaluate."
)
parser.add_argument(
"--device",
type=str,
default="CPU",
help="device to evaluate on."
)
parser.add_argument(
"--cudnn_ws",
type=int,
help="The cudnn workspace size."
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of iterations to run the network."
)
parser.add_argument(
"--warmup_iterations",
type=int,
default=10,
help="Number of warm-up iterations before benchmarking."
)
parser.add_argument(
"--forward_only",
action='store_true',
help="If set, only run the forward pass."
)
parser.add_argument(
"--layer_wise_benchmark",
action='store_true',
help="If True, run the layer-wise benchmark as well."
)
parser.add_argument(
"--engine",
type=str,
default="",
help="If set, blindly prefer the given engine(s) for every op.")
parser.add_argument(
"--dump_model",
action='store_true',
help="If True, dump the model prototxts to disk."
)
parser.add_argument("--net_type", type=str, default="simple")
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--use-nvtx", default=False, action='store_true')
parser.add_argument("--htrace_span_log_path", type=str)
return parser
def benchmark(args):
print('Batch size: {}'.format(args.batch_size))
mf = ModelDownloader()
init_net, pred_net, value_info = mf.get_c2_model(args.model)
input_shapes = {k : [args.batch_size] + v[-1][1:] for (k, v) in value_info.items()}
print("input info: {}".format(input_shapes))
external_inputs = {}
for k, v in input_shapes.items():
external_inputs[k] = np.random.randn(*v).astype(np.float32)
if args.device == 'CPU':
device_option = core.DeviceOption(caffe2_pb2.CPU)
elif args.device == 'MKL':
device_option = core.DeviceOption(caffe2_pb2.MKLDNN)
elif args.device == 'IDEEP':
device_option = core.DeviceOption(caffe2_pb2.IDEEP)
else:
raise Exception("Unknown device: {}".format(args.device))
print("Device option: {}, {}".format(args.device, device_option))
pred_net.device_option.CopyFrom(device_option)
for op in pred_net.op:
op.device_option.CopyFrom(device_option)
# Hack to initialized weights into MKL/IDEEP context
workspace.RunNetOnce(init_net)
bb = workspace.Blobs()
weights = {}
for b in bb:
weights[b] = workspace.FetchBlob(b)
for k, v in external_inputs.items():
weights[k] = v
workspace.ResetWorkspace()
with core.DeviceScope(device_option):
for name, blob in weights.items():
#print("{}".format(name))
workspace.FeedBlob(name, blob, device_option)
workspace.CreateNet(pred_net)
start = time.time()
res = workspace.BenchmarkNet(pred_net.name,
args.warmup_iterations,
args.iterations,
args.layer_wise_benchmark)
print("FPS: {:.2f}".format(1/res[0]*1000*args.batch_size))
if __name__ == '__main__':
args, extra_args = GetArgumentParser().parse_known_args()
if (
not args.batch_size or not args.model or not args.order
):
GetArgumentParser().print_help()
benchmark(args)
|
pytorch-master
|
caffe2/python/ideep/test_ideep_net.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ChannelShuffleTest(hu.HypothesisTestCase):
@given(size=st.integers(8, 10),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 32),
group=st.integers(2, 4),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_channel_shuffle(self, size, input_channels, batch_size, group, stride, pad, kernel, gc, dc):
op = core.CreateOperator(
"ChannelShuffle",
["X"],
["Y"],
group=group,
stride=stride,
pad=pad,
kernel=kernel,
)
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/channel_shuffle_op_test.py
|
import unittest
import numpy as np
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import core, workspace, timeout_guard
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class BlobsQueueDBTest(unittest.TestCase):
def test_create_blobs_queue_db_string(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
def add_blobs(queue, num_samples):
blob = core.BlobReference("blob")
status = core.BlobReference("blob_status")
for i in range(num_samples):
self._add_blob_to_queue(
queue, self._create_test_tensor_protos(i), blob, status
)
self._test_create_blobs_queue_db(add_blobs)
def test_create_blobs_queue_db_tensor(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
def add_blobs(queue, num_samples):
blob = core.BlobReference("blob")
status = core.BlobReference("blob_status")
for i in range(num_samples):
data = self._create_test_tensor_protos(i)
data = np.array([data], dtype=str)
self._add_blob_to_queue(
queue, data, blob, status
)
self._test_create_blobs_queue_db(add_blobs)
def _test_create_blobs_queue_db(self, add_blobs_fun):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
num_samples = 10000
batch_size = 10
init_net = core.Net('init_net')
net = core.Net('test_create_blobs_queue_db')
queue = init_net.CreateBlobsQueue([], 'queue', capacity=num_samples)
reader = init_net.CreateBlobsQueueDB(
[queue],
'blobs_queue_db_reader',
value_blob_index=0,
timeout_secs=0.1,
)
workspace.RunNetOnce(init_net)
add_blobs_fun(queue, num_samples)
net.TensorProtosDBInput(
[reader],
['image', 'label'],
batch_size=batch_size
)
workspace.CreateNet(net)
close_net = core.Net('close_net')
close_net.CloseBlobsQueue([queue], [])
for i in range(int(num_samples / batch_size)):
with timeout_guard.CompleteInTimeOrDie(2.0):
workspace.RunNet(net)
images = workspace.FetchBlob('image')
labels = workspace.FetchBlob('label')
self.assertEqual(batch_size, len(images))
self.assertEqual(batch_size, len(labels))
for idx, item in enumerate(images):
self.assertEqual(
"foo{}".format(i * batch_size + idx).encode('utf-8'), item
)
for item in labels:
self.assertEqual(1, item)
workspace.RunNetOnce(close_net)
def _add_blob_to_queue(self, queue, data, blob, status):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
workspace.FeedBlob(blob, data, core.DeviceOption(caffe2_pb2.CPU, 0))
op = core.CreateOperator(
"SafeEnqueueBlobs",
[queue, blob],
[blob, status],
)
workspace.RunOperatorOnce(op)
def _create_test_tensor_protos(self, idx):
item = caffe2_pb2.TensorProtos()
data = item.protos.add()
data.data_type = core.DataType.STRING
data.string_data.append("foo{}".format(idx).encode('utf-8'))
label = item.protos.add()
label.data_type = core.DataType.INT32
label.int32_data.append(1)
return item.SerializeToString()
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/blobs_queue_db_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LRNTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
im_size=st.integers(1, 10),
order=st.sampled_from(["NCHW"]),
**mu.gcs)
@settings(deadline=10000)
def test_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
["X"],
["Y", "Y_scale"],
size=5,
alpha=0.001,
beta=0.75,
bias=2.0,
order=order,
)
X = np.random.rand(
batch_size, input_channels, im_size, im_size).astype(np.float32)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/LRN_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ElementwiseSumTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inputs=st.integers(2, 7),
inplace=st.booleans(),
**mu.gcs)
def test_elementwise_sum(self,
size,
input_channels,
batch_size,
inputs,
inplace,
gc,
dc):
op = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
)
Xs = [np.random.rand(batch_size, input_channels, size, size).astype(
np.float32) for _ in range(inputs)]
self.assertDeviceChecks(dc, op, Xs, [0])
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inputs=st.integers(2, 7),
inplace=st.booleans(),
**mu.gcs_cpu_ideep)
def test_elementwise_sum_fallback(self,
size,
input_channels,
batch_size,
inputs,
inplace,
gc,
dc):
op = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
device_option=dc[1]
)
Xs = [np.random.rand(batch_size, input_channels, size, size).astype(
np.float32) for _ in range(inputs)]
sum_val = Xs[0]
workspace.FeedBlob("X_0", Xs[0], dc[0])
for i, x in enumerate(Xs):
if i == 0: continue
sum_val += x
workspace.FeedBlob("X_{}".format(i), x, dc[1])
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob("X_0" if inplace else "Y")
if not np.allclose(sum_val, Y, atol=0.01, rtol=0.01):
print(Y.flatten())
print(sum_val.flatten())
print(np.max(np.abs(Y - sum_val)))
self.assertTrue(False)
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inputs=st.integers(2, 7),
inplace=st.booleans(),
**mu.gcs_cpu_ideep)
def test_int8_elementwise_sum(self,
size,
input_channels,
batch_size,
inputs,
inplace,
gc,
dc):
sum_fp32 = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
)
Xs = [np.random.rand(batch_size, input_channels, size, size).astype(
np.float32) for _ in range(inputs)]
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
Xi_scales = []
Xi_zero_points = []
for i, X in enumerate(Xs):
workspace.FeedBlob("X_{}".format(i), X, dc[0])
if X.min() >= 0:
Xi_scales.append(np.absolute(X).max() / 0xFF)
Xi_zero_points.append(0)
else:
Xi_scales.append(np.absolute(X).max() / 0x7F)
Xi_zero_points.append(128)
workspace.RunOperatorOnce(sum_fp32)
Y = workspace.FetchBlob("X_0" if inplace else "Y")
if Y.min() >= 0:
Y_scale = np.absolute(Y).max() / 0xFF
Y_zero_point = 0
else:
Y_scale = np.absolute(Y).max() / 0x7F
Y_zero_point = 128
workspace.ResetWorkspace()
net = caffe2_pb2.NetDef()
for i, Xi in enumerate(Xs):
workspace.FeedBlob("Xi_{}".format(i), Xi, dc[1])
sw2nhwc = core.CreateOperator(
"NCHW2NHWC",
["Xi_{}".format(i)],
["Xi_{}_nhwc".format(i)],
device_option=dc[1]
)
quantize = core.CreateOperator(
"Int8Quantize",
["Xi_{}_nhwc".format(i)],
["Xi_{}_quantized".format(i)],
engine="DNNLOWP",
device_option=dc[1],
Y_zero_point=Xi_zero_points[i],
Y_scale=Xi_scales[i],
)
net.op.extend([sw2nhwc, quantize])
sum = core.CreateOperator(
"Int8Sum",
["Xi_{}_quantized".format(i) for i in range(inputs)],
["Xi_0_quantized" if inplace else "Y_quantized"],
engine="DNNLOWP",
device_option=dc[1],
Y_zero_point=Y_zero_point,
Y_scale=Y_scale,
)
dequantize = core.CreateOperator(
"Int8Dequantize",
["Xi_0_quantized" if inplace else "Y_quantized"],
["Y_nhwc"],
engine="DNNLOWP",
device_option=dc[1],
)
sw2nchw = core.CreateOperator(
"NHWC2NCHW",
["Y_nhwc"],
["Y_out"],
device_option=dc[1]
)
net.op.extend([sum, dequantize, sw2nchw])
workspace.RunNetOnce(net)
Y_out = workspace.FetchBlob("Y_out")
MSE = np.square(np.subtract(Y, Y_out)).mean()
if MSE > 0.005:
print(Y.flatten())
print(Y_out.flatten())
print(np.max(np.abs(Y_out - Y)))
print("MSE", MSE)
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/elementwise_sum_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import math
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.transformations import optimizeForMKLDNN
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvFusionTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(1, 16),
output_channels=st.integers(1, 16),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
group=st.integers(1, 1),
**mu.gcs)
def test_convolution_relu_fusion(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
conv = core.CreateOperator(
"Conv",
["X0", "w0", "b0"] if use_bias else ["X0", "w0"],
["Y0"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[0]
)
relu = core.CreateOperator(
"Relu",
["Y0"],
["Y0"],
device_option=dc[0]
)
# Manual fusion for Conv + ReLU
conv_fusion = core.CreateOperator(
"ConvFusion",
["X1", "w1", "b1"] if use_bias else ["X1", "w1"],
["Y1"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
fusion_type = 1,
device_option=dc[1]
)
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X0', X, dc[0])
workspace.FeedBlob('w0', w, dc[0])
workspace.FeedBlob('b0', b, dc[0])
workspace.RunOperatorOnce(conv)
workspace.RunOperatorOnce(relu)
Y0 = workspace.FetchBlob('Y0')
workspace.ResetWorkspace()
workspace.FeedBlob('X1', X, dc[1])
workspace.FeedBlob('w1', w, dc[1])
workspace.FeedBlob('b1', b, dc[1])
workspace.RunOperatorOnce(conv_fusion)
Y1 = workspace.FetchBlob('Y1')
if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
# Auto fusion for Conv + ReLU
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
conv_old = caffe2_pb2.OperatorDef()
conv_old.CopyFrom(conv)
conv_old.device_option.CopyFrom(dc[1])
relu_old = caffe2_pb2.OperatorDef()
relu_old.CopyFrom(relu)
relu_old.device_option.CopyFrom(dc[1])
old_net.op.extend([conv_old, relu_old])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
self.assertTrue(len(net.Proto().op) == 1)
self.assertTrue(net.Proto().op[0].type == "ConvFusion")
workspace.RunOperatorOnce(net.Proto().op[0])
Y2 = workspace.FetchBlob('Y0')
if not np.allclose(Y0, Y2, atol=0.01, rtol=0.01):
print(Y2.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y2 - Y0)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(1, 16),
output_channels=st.integers(1, 16),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
group=st.integers(1, 1),
sum_add=st.sampled_from(["Sum", "Add"]),
**mu.gcs)
def test_convolution_sum_fusion(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, sum_add, gc, dc):
pool_S0 = core.CreateOperator(
"MaxPool",
["SX0"],
["S0"],
stride=2,
pad=0,
kernel=2,
device_option=dc[0]
)
conv = core.CreateOperator(
"Conv",
["X0", "w0", "b0"] if use_bias else ["X0", "w0"],
["Y0"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[0]
)
sum = core.CreateOperator(
sum_add,
["S0", "Y0"],
["S0"],
device_option=dc[0]
)
# Manual fusion for Conv + Sum
pool_S1 = core.CreateOperator(
"MaxPool",
["SX1"],
["S1"],
stride=2,
pad=0,
kernel=2,
group=group,
device_option=dc[1]
)
conv_fusion = core.CreateOperator(
"ConvFusion",
["X1", "w1", "b1", "S1"] if use_bias else ["X1", "w1", "S1"],
["S1"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
fusion_type = 2,
device_option=dc[1]
)
pool_input_size = int(math.ceil(float(size + 2 * pad - kernel + 1) / stride)) * 2;
SX = np.random.rand(
batch_size, output_channels * group, pool_input_size, pool_input_size).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('SX0', SX, dc[0])
workspace.FeedBlob('X0', X, dc[0])
workspace.FeedBlob('w0', w, dc[0])
workspace.FeedBlob('b0', b, dc[0])
workspace.RunOperatorOnce(pool_S0)
workspace.RunOperatorOnce(conv)
workspace.RunOperatorOnce(sum)
S0 = workspace.FetchBlob('S0')
workspace.ResetWorkspace()
workspace.FeedBlob('SX1', SX, dc[1])
workspace.FeedBlob('X1', X, dc[1])
workspace.FeedBlob('w1', w, dc[1])
workspace.FeedBlob('b1', b, dc[1])
workspace.RunOperatorOnce(pool_S1)
workspace.RunOperatorOnce(conv_fusion)
S1 = workspace.FetchBlob('S1')
if not np.allclose(S0, S1, atol=0.01, rtol=0.01):
print(S1.flatten())
print(S0.flatten())
print(np.max(np.abs(S1 - S0)))
self.assertTrue(False)
# Auto fusion for Conv + Sum
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
pool_S0_old = caffe2_pb2.OperatorDef()
pool_S0_old.CopyFrom(pool_S0)
pool_S0_old.device_option.CopyFrom(dc[1])
conv_old = caffe2_pb2.OperatorDef()
conv_old.CopyFrom(conv)
conv_old.device_option.CopyFrom(dc[1])
sum_old = caffe2_pb2.OperatorDef()
sum_old.CopyFrom(sum)
sum_old.device_option.CopyFrom(dc[1])
old_net.op.extend([pool_S0_old, conv_old, sum_old])
# Conv + Sum should be fused case: [PreNode, Conv, Sum]
workspace.FeedBlob('SX0', SX, dc[1])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
self.assertTrue(len(net.Proto().op) == 2)
self.assertTrue(net.Proto().op[1].type == "ConvFusion")
workspace.RunNetOnce(net.Proto())
# The output tensor name will be changed by optimization
# sometimes when applying conv sum fusion
S2 = workspace.FetchBlob(net.Proto().op[-1].output[0])
if not np.allclose(S0, S2, atol=0.01, rtol=0.01):
print(S2.flatten())
print(S0.flatten())
print(np.max(np.abs(S2 - S0)))
self.assertTrue(False)
# Conv + Sum should be fused case: [Conv, PreNode, Sum]
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
workspace.FeedBlob('SX0', SX, dc[1])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
old_net.op.extend([conv_old, pool_S0_old, sum_old])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
self.assertTrue(len(net.Proto().op) == 2)
self.assertTrue(net.Proto().op[1].type == "ConvFusion")
workspace.RunNetOnce(net.Proto())
# The output tensor name will be changed by optimization
# sometimes when applying conv sum fusion
S2 = workspace.FetchBlob(net.Proto().op[-1].output[0])
if not np.allclose(S0, S2, atol=0.01, rtol=0.01):
print(S2.flatten())
print(S0.flatten())
print(np.max(np.abs(S2 - S0)))
self.assertTrue(False)
# Conv + Sum should not be fused case: [Conv, midOp, preNode, Sum] Conv output is used by midOp
dropout = core.CreateOperator(
"Dropout",
["Y0"],
["Y_dropout"],
ratio=0.5,
is_test=True,
device_option=dc[1]
)
workspace.ResetWorkspace()
workspace.FeedBlob('SX0', SX, dc[1])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
old_net = caffe2_pb2.NetDef()
old_net.op.extend([conv_old, dropout, pool_S0_old, sum_old])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
self.assertTrue(len(net.Proto().op) == 4)
workspace.RunNetOnce(net.Proto())
S2 = workspace.FetchBlob(net.Proto().op[-1].output[0])
if not np.allclose(S0, S2, atol=0.01, rtol=0.01):
print(S2.flatten())
print(S0.flatten())
print(np.max(np.abs(S2 - S0)))
self.assertTrue(False)
# Conv + Sum should not be fused case: [Conv, preNode, Sum, midOp] preNode output is used by midOp
sum1 = core.CreateOperator(
sum_add,
["S0", "Y0"],
["S3"],
device_option=dc[1]
)
dropout = core.CreateOperator(
"Dropout",
["S0"],
["Y_dropout"],
ratio=0.5,
is_test=True,
device_option=dc[1]
)
workspace.ResetWorkspace()
workspace.FeedBlob('SX0', SX, dc[1])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
old_net = caffe2_pb2.NetDef()
old_net.op.extend([conv_old, pool_S0_old, sum1, dropout])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
print("net={}\n".format(net.Proto()))
self.assertTrue(len(net.Proto().op) == 4)
workspace.RunNetOnce(net.Proto())
S2 = workspace.FetchBlob(net.Proto().op[-2].output[0])
if not np.allclose(S0, S2, atol=0.01, rtol=0.01):
print(S2.flatten())
print(S0.flatten())
print(np.max(np.abs(S2 - S0)))
self.assertTrue(False)
# Conv + Sum should not be fused case: [Conv, midOp, preNode, Sum]
# midOp output has the same name with that of the Conv input
relu_0 = core.CreateOperator(
"Relu",
["X0"],
["X1"],
device_option=dc[0]
)
conv = core.CreateOperator(
"Conv",
["X1", "w0", "b0"] if use_bias else ["X1", "w0"],
["Y0"],
stride=1,
pad=0,
kernel=1,
device_option=dc[0]
)
relu_1 = core.CreateOperator(
"Relu",
["X1"],
["X1"],
device_option=dc[0]
)
pool = core.CreateOperator(
"MaxPool",
["X1"],
["S0"],
stride=1,
pad=0,
kernel=1,
device_option=dc[0]
)
sum = core.CreateOperator(
"Sum",
["S0", "Y0"],
["S0"],
device_option=dc[0]
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(
input_channels, input_channels, 1, 1).astype(np.float32) - 0.5
b = np.random.rand(input_channels).astype(np.float32) - 0.5
workspace.SwitchWorkspace(old_ws_name)
workspace.ResetWorkspace()
workspace.FeedBlob('X0', X, dc[0])
workspace.FeedBlob('w0', w, dc[0])
workspace.FeedBlob('b0', b, dc[0])
workspace.RunOperatorOnce(relu_0)
workspace.RunOperatorOnce(conv)
workspace.RunOperatorOnce(relu_1)
workspace.RunOperatorOnce(pool)
workspace.RunOperatorOnce(sum)
S0 = workspace.FetchBlob('S0')
workspace.ResetWorkspace()
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
relu_0_old = caffe2_pb2.OperatorDef()
relu_0_old.CopyFrom(relu_0)
relu_0_old.device_option.CopyFrom(dc[1])
conv_old = caffe2_pb2.OperatorDef()
conv_old.CopyFrom(conv)
conv_old.device_option.CopyFrom(dc[1])
relu_1_old = caffe2_pb2.OperatorDef()
relu_1_old.CopyFrom(relu_1)
relu_1_old.device_option.CopyFrom(dc[1])
pool_old = caffe2_pb2.OperatorDef()
pool_old.CopyFrom(pool)
pool_old.device_option.CopyFrom(dc[1])
sum_old = caffe2_pb2.OperatorDef()
sum_old.CopyFrom(sum)
sum_old.device_option.CopyFrom(dc[1])
old_net = caffe2_pb2.NetDef()
old_net.op.extend([relu_0_old, conv_old, relu_1_old, pool_old, sum_old])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
self.assertTrue(len(net.Proto().op) == 5)
workspace.RunNetOnce(net.Proto())
S2 = workspace.FetchBlob(net.Proto().op[-1].output[0])
if not np.allclose(S0, S2, atol=0.01, rtol=0.01):
print(S2.flatten())
print(S0.flatten())
print(np.max(np.abs(S2 - S0)))
self.assertTrue(False)
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(1, 16),
output_channels=st.integers(1, 16),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
group=st.integers(1, 1),
sum_add=st.sampled_from(["Sum", "Add"]),
**mu.gcs)
def test_convolution_sum_relu_fusion(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, sum_add, gc, dc):
conv_S0 = core.CreateOperator(
"Conv",
["SX0", "Sw0", "Sb0"] if use_bias else ["SX0", "Sw0"],
["S0"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[0]
)
conv = core.CreateOperator(
"Conv",
["X0", "w0", "b0"] if use_bias else ["X0", "w0"],
["Y0"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[0]
)
sum = core.CreateOperator(
sum_add,
["S0", "Y0"],
["S0"],
device_option=dc[0]
)
relu = core.CreateOperator(
"Relu",
["S0"],
["S0"],
device_option=dc[0]
)
# Manual fusion for Conv + Sum + ReLU
conv_S1 = core.CreateOperator(
"Conv",
["SX1", "Sw1", "Sb1"] if use_bias else ["SX1", "Sw1"],
["S1"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[1]
)
conv_fusion = core.CreateOperator(
"ConvFusion",
["X1", "w1", "b1", "S1"] if use_bias else ["X1", "w1", "S1"],
["S1"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
fusion_type = 3,
device_option=dc[1]
)
SX = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
Sw = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
Sb = np.random.rand(output_channels * group).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('SX0', SX, dc[0])
workspace.FeedBlob('Sw0', Sw, dc[0])
workspace.FeedBlob('Sb0', Sb, dc[0])
workspace.FeedBlob('X0', X, dc[0])
workspace.FeedBlob('w0', w, dc[0])
workspace.FeedBlob('b0', b, dc[0])
workspace.RunOperatorOnce(conv_S0)
workspace.RunOperatorOnce(conv)
workspace.RunOperatorOnce(sum)
workspace.RunOperatorOnce(relu)
S0 = workspace.FetchBlob('S0')
workspace.ResetWorkspace()
workspace.FeedBlob('SX1', SX, dc[1])
workspace.FeedBlob('Sw1', Sw, dc[1])
workspace.FeedBlob('Sb1', Sb, dc[1])
workspace.FeedBlob('X1', X, dc[1])
workspace.FeedBlob('w1', w, dc[1])
workspace.FeedBlob('b1', b, dc[1])
workspace.RunOperatorOnce(conv_S1)
workspace.RunOperatorOnce(conv_fusion)
S1 = workspace.FetchBlob('S1')
if not np.allclose(S0, S1, atol=0.01, rtol=0.01):
print(S1.flatten())
print(S0.flatten())
print(np.max(np.abs(S1 - S0)))
self.assertTrue(False)
# Auto fusion for Conv + Sum + ReLU
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
conv_S0_old = caffe2_pb2.OperatorDef()
conv_S0_old.CopyFrom(conv_S0)
conv_S0_old.device_option.CopyFrom(dc[1])
conv_old = caffe2_pb2.OperatorDef()
conv_old.CopyFrom(conv)
conv_old.device_option.CopyFrom(dc[1])
sum_old = caffe2_pb2.OperatorDef()
sum_old.CopyFrom(sum)
sum_old.device_option.CopyFrom(dc[1])
relu_old = caffe2_pb2.OperatorDef()
relu_old.CopyFrom(relu)
relu_old.device_option.CopyFrom(dc[1])
old_net.op.extend([conv_S0_old, conv_old, sum_old, relu_old])
workspace.FeedBlob('SX0', SX, dc[1])
workspace.FeedBlob('Sw0', Sw, dc[1])
workspace.FeedBlob('Sb0', Sb, dc[1])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
self.assertTrue(len(net.Proto().op) == 2)
self.assertTrue(net.Proto().op[1].type == "ConvFusion")
workspace.RunNetOnce(net.Proto())
# The output tensor name will be changed by optimization
# sometimes when applying conv sum fusion
S2 = workspace.FetchBlob(net.Proto().op[-1].output[0])
if not np.allclose(S0, S2, atol=0.01, rtol=0.01):
print(S2.flatten())
print(S0.flatten())
print(np.max(np.abs(S2 - S0)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(7, 17),
output_channels=st.integers(5, 15),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
group=st.integers(2, 5),
**mu.gcs)
def test_convolution_grouped_sum_relu_fusion(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
conv_S0 = core.CreateOperator(
"Conv",
["SX0", "Sw0", "Sb0"] if use_bias else ["SX0", "Sw0"],
["S0"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[0]
)
conv = core.CreateOperator(
"Conv",
["X0", "w0", "b0"] if use_bias else ["X0", "w0"],
["Y0"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[0]
)
sum = core.CreateOperator(
"Sum",
["S0", "Y0"],
["S0"],
device_option=dc[0]
)
relu = core.CreateOperator(
"Relu",
["S0"],
["S0"],
device_option=dc[0]
)
SX = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
Sw = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
Sb = np.random.rand(output_channels * group).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('SX0', SX, dc[0])
workspace.FeedBlob('Sw0', Sw, dc[0])
workspace.FeedBlob('Sb0', Sb, dc[0])
workspace.FeedBlob('X0', X, dc[0])
workspace.FeedBlob('w0', w, dc[0])
workspace.FeedBlob('b0', b, dc[0])
workspace.RunOperatorOnce(conv_S0)
workspace.RunOperatorOnce(conv)
workspace.RunOperatorOnce(sum)
workspace.RunOperatorOnce(relu)
S0 = workspace.FetchBlob('S0')
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
conv_S0_old = caffe2_pb2.OperatorDef()
conv_S0_old.CopyFrom(conv_S0)
conv_S0_old.device_option.CopyFrom(dc[1])
conv_old = caffe2_pb2.OperatorDef()
conv_old.CopyFrom(conv)
conv_old.device_option.CopyFrom(dc[1])
sum_old = caffe2_pb2.OperatorDef()
sum_old.CopyFrom(sum)
sum_old.device_option.CopyFrom(dc[1])
relu_old = caffe2_pb2.OperatorDef()
relu_old.CopyFrom(relu)
relu_old.device_option.CopyFrom(dc[1])
old_net.op.extend([conv_S0_old, conv_old, sum_old, relu_old])
workspace.FeedBlob('SX0', SX, dc[1])
workspace.FeedBlob('Sw0', Sw, dc[1])
workspace.FeedBlob('Sb0', Sb, dc[1])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
workspace.RunNetOnce(net.Proto())
# The output tensor name will be changed by optimization
# sometimes when applying conv sum fusion
S2 = workspace.FetchBlob(net.Proto().op[-1].output[0])
if not np.allclose(S0, S2, atol=0.01, rtol=0.01):
print(S2.flatten())
print(S0.flatten())
print(np.max(np.abs(S2 - S0)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(1, 16),
output_channels=st.integers(1, 16),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
group=st.integers(1, 1),
inplace=st.sampled_from([True, False]),
**mu.gcs)
def test_convolution_bn_folding(
self, stride, pad, kernel, size, input_channels,
output_channels, batch_size, use_bias, group,
inplace, gc, dc):
conv = core.CreateOperator(
"Conv",
["X0", "w0", "b0"] if use_bias else ["X0", "w0"],
["X1"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[1]
)
bn = core.CreateOperator(
"SpatialBN",
["X1", "scale", "bias", "mean", "var"],
["X1" if inplace else "Y"],
is_test=True,
device_option=dc[1]
)
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
scale = np.random.rand(output_channels).astype(np.float32) + 0.5
bias = np.random.rand(output_channels).astype(np.float32) - 0.5
mean = np.random.randn(output_channels).astype(np.float32)
var = np.absolute(np.random.rand(output_channels).astype(np.float32)) + 0.5
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
workspace.FeedBlob('scale', scale, dc[1])
workspace.FeedBlob('bias', bias, dc[1])
workspace.FeedBlob('mean', mean, dc[1])
workspace.FeedBlob('var', var, dc[1])
workspace.RunOperatorOnce(conv)
workspace.RunOperatorOnce(bn)
Y = workspace.FetchBlob('X1' if inplace else "Y")
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
conv_old = caffe2_pb2.OperatorDef()
conv_old.CopyFrom(conv)
conv_old.device_option.CopyFrom(dc[1])
bn_old = caffe2_pb2.OperatorDef()
bn_old.CopyFrom(bn)
bn_old.device_option.CopyFrom(dc[1])
old_net.op.extend([conv_old, bn_old])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
workspace.FeedBlob('scale', scale, dc[1])
workspace.FeedBlob('bias', bias, dc[1])
workspace.FeedBlob('mean', mean, dc[1])
workspace.FeedBlob('var', var, dc[1])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
self.assertTrue(len(net.Proto().op) == 1)
self.assertTrue(net.Proto().op[0].type == "Conv")
workspace.RunOperatorOnce(net.Proto().op[0])
Y1 = workspace.FetchBlob('X1' if inplace else "Y")
if not np.allclose(Y, Y1, atol=0.01, rtol=0.01):
print(Y.flatten())
print(Y1.flatten())
print(np.max(np.abs(Y - Y1)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(1, 16),
output_channels=st.integers(1, 16),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
group=st.integers(1, 1),
inplace=st.sampled_from([True, False]),
**mu.gcs)
def test_convolution_affch_folding(
self, stride, pad, kernel, size, input_channels,
output_channels, batch_size, use_bias, group,
inplace, gc, dc):
conv = core.CreateOperator(
"Conv",
["X0", "w0", "b0"] if use_bias else ["X0", "w0"],
["X1"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
device_option=dc[1]
)
affch = core.CreateOperator(
"AffineChannel",
["X1", "scale", "bias"],
["X1" if inplace else "Y"],
device_option=dc[1]
)
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
scale = np.random.rand(output_channels).astype(np.float32) + 0.5
bias = np.random.rand(output_channels).astype(np.float32) - 0.5
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
workspace.FeedBlob('scale', scale, dc[1])
workspace.FeedBlob('bias', bias, dc[1])
workspace.RunOperatorOnce(conv)
workspace.RunOperatorOnce(affch)
Y = workspace.FetchBlob('X1' if inplace else "Y")
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
conv_old = caffe2_pb2.OperatorDef()
conv_old.CopyFrom(conv)
conv_old.device_option.CopyFrom(dc[1])
affch_old = caffe2_pb2.OperatorDef()
affch_old.CopyFrom(affch)
affch_old.device_option.CopyFrom(dc[1])
old_net.op.extend([conv_old, affch_old])
workspace.FeedBlob('X0', X, dc[1])
workspace.FeedBlob('w0', w, dc[1])
workspace.FeedBlob('b0', b, dc[1])
workspace.FeedBlob('scale', scale, dc[1])
workspace.FeedBlob('bias', bias, dc[1])
net = core.Net("net")
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
self.assertTrue(len(net.Proto().op) == 1)
self.assertTrue(net.Proto().op[0].type == "Conv")
workspace.RunOperatorOnce(net.Proto().op[0])
Y1 = workspace.FetchBlob('X1' if inplace else "Y")
if not np.allclose(Y, Y1, atol=0.01, rtol=0.01):
print(Y.flatten())
print(Y1.flatten())
print(np.max(np.abs(Y - Y1)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/convfusion_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class SoftmaxTest(hu.HypothesisTestCase):
@given(size=st.integers(8, 20),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_softmax(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator(
"Softmax",
["X"],
["Y"],
axis=1,
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/softmax_op_test.py
|
pytorch-master
|
caffe2/python/ideep/__init__.py
|
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TransposeTest(hu.HypothesisTestCase):
@given(
X=hu.tensor(min_dim=1, max_dim=5, dtype=np.float32), use_axes=st.booleans(), **mu.gcs)
@settings(deadline=None, max_examples=50)
def test_transpose(self, X, use_axes, gc, dc):
ndim = len(X.shape)
axes = np.arange(ndim)
np.random.shuffle(axes)
if use_axes:
op = core.CreateOperator(
"Transpose", ["X"], ["Y"], axes=axes, device_option=gc)
else:
op = core.CreateOperator(
"Transpose", ["X"], ["Y"], device_option=gc)
def transpose_ref(X):
if use_axes:
return [np.transpose(X, axes=axes)]
else:
return [np.transpose(X)]
self.assertReferenceChecks(gc, op, [X], transpose_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/transpose_op_test.py
|
import unittest
from functools import reduce
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class FcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
@settings(deadline=1000)
def test_fc_2_dims(self, n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
W = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
op = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"]
)
self.assertDeviceChecks(dc, op, [X, W, b], [0])
for i in range(3):
self.assertGradientChecks(gc, op, [X, W, b], i, [0])
@given(n=st.integers(1, 5),
m=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
axis=st.integers(1, 3),
**mu.gcs)
def test_fc_with_axis(self, n, m, c, h, w, axis, gc, dc):
X = np.random.rand(n, c, h, w).astype(np.float32) - 0.5
k = reduce((lambda x, y: x * y), [n, c, h, w][axis - 4:])
nn = reduce((lambda x, y: x * y), [n, c, h, w][:axis])
W = np.random.rand(m, k).astype(np.float32) - 0.5
b = np.random.rand(m).astype(np.float32) - 0.5
dY = np.random.rand(nn, m).astype(np.float32) - 0.5
op0 = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"],
axis=axis,
device_option=dc[0]
)
op0_bw = core.CreateOperator(
'FCGradient',
['X', 'W', 'dY'],
["dW", "db"],
axis=axis,
device_option=dc[0]
)
workspace.ResetWorkspace()
workspace.FeedBlob('X', X, dc[0])
workspace.FeedBlob('W', W, dc[0])
workspace.FeedBlob('b', b, dc[0])
workspace.RunOperatorOnce(op0)
Y0 = workspace.FetchBlob('Y')
workspace.FeedBlob('dY', dY, dc[0])
workspace.RunOperatorOnce(op0_bw)
dW0 = workspace.FetchBlob('dW')
db0 = workspace.FetchBlob('db')
op1 = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"],
axis=axis,
device_option=dc[1]
)
op1_bw = core.CreateOperator(
'FCGradient',
['X', 'W', 'dY'],
["dW", "db"],
axis=axis,
device_option=dc[1]
)
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X', X, dc[1])
workspace.FeedBlob('W', W, dc[1])
workspace.FeedBlob('b', b, dc[1])
workspace.RunOperatorOnce(op1)
Y1 = workspace.FetchBlob('Y')
workspace.FeedBlob('dY', dY, dc[1])
workspace.RunOperatorOnce(op1_bw)
dW1 = workspace.FetchBlob('dW')
db1 = workspace.FetchBlob('db')
Y0 = Y0.flatten()
Y1 = Y1.flatten()
if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01):
print(Y1)
print(Y0)
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
dW0 = dW0.flatten()
dW1 = dW1.flatten()
if not np.allclose(dW0, dW1, atol=0.01, rtol=0.01):
print(dW1)
print(dW0)
print(np.max(np.abs(dW1 - dW0)))
self.assertTrue(False)
db0 = db0.flatten()
db1 = db1.flatten()
if not np.allclose(db0, db1, atol=0.01, rtol=0.01):
print(db1)
print(db0)
print(np.max(np.abs(db1 - db0)))
self.assertTrue(False)
@given(n=st.integers(1, 5),
o=st.integers(1, 5),
i=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
axis_w=st.integers(1, 3),
**mu.gcs)
@settings(deadline=1000)
def test_fc_with_axis_w(self, n, o, i, h, w, axis_w, gc, dc):
W = np.random.rand(o, i, h, w).astype(np.float32) - 0.5
k = reduce((lambda x, y: x * y), [o, i, h, w][axis_w - 4:])
m = reduce((lambda x, y: x * y), [o, i, h, w][:axis_w])
X = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(m).astype(np.float32) - 0.5
dY = np.random.rand(n, m).astype(np.float32) - 0.5
op0 = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"],
axis_w=axis_w,
device_option=dc[0]
)
op0_bw = core.CreateOperator(
'FCGradient',
['X', 'W', 'dY'],
["dW", "db"],
axis_w=axis_w,
device_option=dc[0]
)
workspace.ResetWorkspace()
workspace.FeedBlob('X', X, dc[0])
workspace.FeedBlob('W', W, dc[0])
workspace.FeedBlob('b', b, dc[0])
workspace.RunOperatorOnce(op0)
Y0 = workspace.FetchBlob('Y')
workspace.FeedBlob('dY', dY, dc[0])
workspace.RunOperatorOnce(op0_bw)
dW0 = workspace.FetchBlob('dW')
db0 = workspace.FetchBlob('db')
op1 = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"],
axis_w=axis_w,
device_option=dc[1]
)
op1_bw = core.CreateOperator(
'FCGradient',
['X', 'W', 'dY'],
["dW", "db"],
axis_w=axis_w,
device_option=dc[1]
)
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X', X, dc[1])
workspace.FeedBlob('W', W, dc[1])
workspace.FeedBlob('b', b, dc[1])
workspace.RunOperatorOnce(op1)
Y1 = workspace.FetchBlob('Y')
workspace.FeedBlob('dY', dY, dc[1])
workspace.RunOperatorOnce(op1_bw)
dW1 = workspace.FetchBlob('dW')
db1 = workspace.FetchBlob('db')
Y0 = Y0.flatten()
Y1 = Y1.flatten()
if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01):
print(Y1)
print(Y0)
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
dW0 = dW0.flatten()
dW1 = dW1.flatten()
if not np.allclose(dW0, dW1, atol=0.01, rtol=0.01):
print(dW1)
print(dW0)
print(np.max(np.abs(dW1 - dW0)))
self.assertTrue(False)
db0 = db0.flatten()
db1 = db1.flatten()
if not np.allclose(db0, db1, atol=0.01, rtol=0.01):
print(db1)
print(db0)
print(np.max(np.abs(db1 - db0)))
self.assertTrue(False)
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
@settings(deadline=10000)
def test_fc_4_dims_src(self, n, m, k, gc, dc):
X = np.random.rand(m, k, m, m).astype(np.float32) - 0.5
W = np.random.rand(n, k * m * m).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
op = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"]
)
self.assertDeviceChecks(dc, op, [X, W, b], [0])
for i in range(3):
self.assertGradientChecks(gc, op, [X, W, b], i, [0])
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
@settings(deadline=10000)
def test_fc_4_dims(self, n, m, k, gc, dc):
X = np.random.rand(m, k, m, m).astype(np.float32) - 0.5
W = np.random.rand(n, k, m, m).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
op = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"]
)
self.assertDeviceChecks(dc, op, [X, W, b], [0])
for i in range(3):
self.assertGradientChecks(gc, op, [X, W, b], i, [0])
@given(n=st.integers(2, 5), m=st.integers(2, 5),
k=st.integers(2, 5), **mu.gcs_cpu_ideep)
def test_int8_fc_4_dims(self, n, m, k, gc, dc):
X = np.random.rand(m, k, m, m).astype(np.float32) - 0.5
w = np.random.rand(n, k, m, m).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
fc_fp32 = core.CreateOperator(
'FC',
['X', 'w', 'b'],
["Y"]
)
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X', X, dc[0])
workspace.FeedBlob('w', w, dc[0])
workspace.FeedBlob('b', b, dc[0])
workspace.RunOperatorOnce(fc_fp32)
Y = workspace.FetchBlob('Y')
workspace.ResetWorkspace()
Y_absmax = np.array([np.absolute(Y).max()]).astype(np.float32)
if Y.min() >= 0:
Y_scale = Y_absmax / 0xFF
Y_zero_point = 0
else:
Y_scale = Y_absmax / 0x7F
Y_zero_point = 128
X_absmax = np.array([np.absolute(X).max()]).astype(np.float32)
if X.min() >= 0:
X_scale = X_absmax / 0xFF
X_zero_point = 0
else:
X_scale = X_absmax / 0x7F
X_zero_point = 128
w_absmax = np.array([np.absolute(w[i, ...]).max() for i in range(w.shape[0])]).astype(np.float32)
w_scale = w_absmax / 0x7F
w_zero_point = 128
w = np.transpose(w, (0, 2, 3, 1)).astype(np.float32)
w_bytes = np.rint([w[i, ...] / w_scale[i] for i in range(w.shape[0])]).astype(np.int8) + w_zero_point
w_filler = core.CreateOperator(
"Int8GivenTensorFill",
[], ["wi"],
shape=w.shape,
values=w_bytes.astype(np.uint8).tobytes(),
Y_zero_point=w_zero_point,
Y_scales=w_scale,
device_option=dc[1],
)
b_scale = w_scale * X_scale
b_zero_point = 0
b_bytes = np.rint([b[i] / b_scale[i] for i in range(b.shape[0])]).astype(np.int32)
b_filler = core.CreateOperator(
"Int8GivenIntTensorFill",
[], ["bi"],
shape=b.shape,
values=b_bytes,
Y_zero_point=b_zero_point,
Y_scales=b_scale,
device_option=dc[1],
)
sw2nhwc = core.CreateOperator(
"NCHW2NHWC",
["Xi"],
["Xi_nhwc"],
device_option=dc[1]
)
quantize_X = core.CreateOperator(
"Int8Quantize",
["Xi_nhwc"],
["Xi_quantized"],
engine="DNNLOWP",
device_option=dc[1],
Y_zero_point=X_zero_point,
Y_scale=X_scale[0],
)
fc = core.CreateOperator(
'Int8FC',
['Xi_quantized', 'wi', 'bi'],
["Y_out"],
engine="DNNLOWP",
device_option=dc[1],
Y_zero_point=Y_zero_point,
Y_scale=Y_scale[0],
)
net = caffe2_pb2.NetDef()
net.op.extend([w_filler, b_filler, sw2nhwc, quantize_X, fc])
workspace.FeedBlob("Xi", X, dc[1])
workspace.RunNetOnce(net)
Y_out = workspace.FetchBlob("Y_out")
MSE = np.square(np.subtract(Y, Y_out)).mean()
if MSE > 0.005:
print(Y.flatten())
print(Y_out.flatten())
print(np.max(np.abs(Y_out - Y)))
print("MSE", MSE)
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/fc_op_test.py
|
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestWeightedSumOp(hu.HypothesisTestCase):
@given(n=st.integers(5, 8), m=st.integers(1, 1),
d=st.integers(2, 4), grad_on_w=st.booleans(),
**mu.gcs_ideep_only)
def test_weighted_sum(self, n, m, d, grad_on_w, gc, dc):
input_names = []
input_vars = []
for i in range(m):
X_name = 'X' + str(i)
w_name = 'w' + str(i)
input_names.extend([X_name, w_name])
var = np.random.rand(n, d).astype(np.float32)
vars()[X_name] = var
input_vars.append(var)
var = np.random.rand(1).astype(np.float32)
vars()[w_name] = var
input_vars.append(var)
def weighted_sum_op_ref(*args):
res = np.zeros((n, d))
for i in range(m):
res = res + args[2 * i + 1] * args[2 * i]
return (res, )
op = core.CreateOperator(
"WeightedSum",
input_names,
['Y'],
grad_on_w=grad_on_w,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=input_vars,
reference=weighted_sum_op_ref,
)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/weightedsum_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace, model_helper
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LeakyReluTest(hu.HypothesisTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
# default step size is 0.05
input_data[np.logical_and(
input_data >= 0, input_data <= 0.051)] = 0.051
input_data[np.logical_and(
input_data <= 0, input_data >= -0.051)] = -0.051
return input_data,
def _get_op(self, device_option, alpha, order, inplace=False):
outputs = ['output' if not inplace else "input"]
op = core.CreateOperator(
'LeakyRelu',
['input'],
outputs,
alpha=alpha,
device_option=device_option)
return op
def _feed_inputs(self, input_blobs, device_option):
names = ['input', 'scale', 'bias']
for name, blob in zip(names, input_blobs):
self.ws.create_blob(name).feed(blob, device_option=device_option)
@given(N=st.integers(2, 3),
C=st.integers(2, 3),
H=st.integers(2, 3),
W=st.integers(2, 3),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000),
**mu.gcs)
@settings(deadline=1000)
def test_leaky_relu_gradients(self, gc, dc, N, C, H, W, alpha, seed):
np.random.seed(seed)
op = self._get_op(
device_option=gc,
alpha=alpha,
order='NCHW')
input_blobs = self._get_inputs(N, C, H, W, "NCHW")
self.assertDeviceChecks(dc, op, input_blobs, [0])
self.assertGradientChecks(gc, op, input_blobs, 0, [0])
@given(N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000))
def test_leaky_relu_model_helper_helper(self, N, C, H, W, alpha, seed):
np.random.seed(seed)
order = 'NCHW'
arg_scope = {'order': order}
model = model_helper.ModelHelper(name="test_model", arg_scope=arg_scope)
model.LeakyRelu(
'input',
'output',
alpha=alpha)
input_blob = np.random.rand(N, C, H, W).astype(np.float32)
self.ws.create_blob('input').feed(input_blob)
self.ws.create_net(model.param_init_net).run()
self.ws.create_net(model.net).run()
output_blob = self.ws.blobs['output'].fetch()
assert output_blob.shape == (N, C, H, W)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/leaky_relu_op_test.py
|
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.ideep_test_util as mu
@st.composite
def _tensor_splits(draw, add_axis=False):
"""Generates (axis, split_info, tensor_splits) tuples."""
tensor = draw(hu.tensor(min_dim=2, min_value=4)) # Each dim has at least 4 elements.
axis = draw(st.integers(-len(tensor.shape), len(tensor.shape) - 1))
if add_axis:
# Simple case: get individual slices along one axis, where each of them
# is (N-1)-dimensional. The axis will be added back upon concatenation.
return (
axis,
np.ones(tensor.shape[axis], dtype=np.int32),
[
np.array(tensor.take(i, axis=axis))
for i in range(tensor.shape[axis])
]
)
else:
# General case: pick some (possibly consecutive, even non-unique)
# indices at which we will split the tensor, along the given axis.
splits = sorted(draw(
st.lists(elements=st.integers(0, tensor.shape[axis]), max_size=4)
) + [0, tensor.shape[axis]])
# Not support empty tensor
splits = list(set(splits))
return (
axis,
np.array(np.diff(splits), dtype=np.int32),
[
tensor.take(range(splits[i], splits[i + 1]), axis=axis)
for i in range(len(splits) - 1)
],
)
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestConcatSplitOps(hu.HypothesisTestCase):
@given(tensor_splits=_tensor_splits(),
**mu.gcs)
@settings(deadline=10000)
def test_concat(self, tensor_splits, gc, dc):
axis, _, splits = tensor_splits
op = core.CreateOperator(
"Concat",
['X_{}'.format(i) for i in range(len(splits))],
['concat_result', 'split_info'],
axis=axis
)
self.assertDeviceChecks(dc, op, splits, [0, 1])
self.assertGradientChecks(gc, op, splits, 0, [0])
@given(tensor_splits=_tensor_splits(),
split_as_arg=st.booleans(),
**mu.gcs)
@settings(deadline=10000)
def test_split(self, tensor_splits, split_as_arg, gc, dc):
axis, split_info, splits = tensor_splits
split_as_arg = True
if split_as_arg:
input_names = ['input']
input_tensors = [np.concatenate(splits, axis=axis)]
kwargs = dict(axis=axis, split=split_info)
else:
input_names = ['input', 'split']
input_tensors = [np.concatenate(splits, axis=axis), split_info]
kwargs = dict(axis=axis)
op = core.CreateOperator(
"Split",
input_names,
['X_{}'.format(i) for i in range(len(split_info))],
**kwargs
)
def split_ref(input, split=split_info):
s = np.cumsum([0] + list(split))
return [
np.array(input.take(np.arange(s[i], s[i + 1]), axis=axis))
for i in range(len(split))
]
outputs_with_grad = range(len(split_info))
self.assertDeviceChecks(dc, op, input_tensors, outputs_with_grad)
self.assertGradientChecks(gc, op, input_tensors, 0, outputs_with_grad)
@given(tensor_splits=_tensor_splits(add_axis=True), **mu.gcs)
@settings(deadline=10000)
def test_concat_add_axis(self, tensor_splits, gc, dc):
axis, _, splits = tensor_splits
op = core.CreateOperator(
"Concat",
['X_{}'.format(i) for i in range(len(splits))],
['concat_result', 'split_info'],
axis=axis,
add_axis=1
)
self.assertDeviceChecks(dc, op, splits, [0, 1])
for i in range(len(splits)):
self.assertGradientChecks(gc, op, splits, i, [0])
@given(tensor_splits=_tensor_splits(add_axis=True), **mu.gcs)
def test_concat_with_TensorCPU(self, tensor_splits, gc, dc):
axis, _, splits = tensor_splits
op0 = core.CreateOperator(
"Concat",
['X_{}'.format(i) for i in range(len(splits))],
['concat_result0', 'split_info0'],
axis=axis,
add_axis=1,
device_option=dc[0]
)
op1 = core.CreateOperator(
"Concat",
['X_{}'.format(i) for i in range(len(splits))],
['concat_result1', 'split_info1'],
axis=axis,
add_axis=1,
device_option=dc[1]
)
for i, X in enumerate(splits):
workspace.FeedBlob('X_{}'.format(i), X, dc[0])
workspace.RunOperatorOnce(op0)
res0 = workspace.FetchBlob('concat_result0')
inf0 = workspace.FetchBlob('split_info0')
workspace.RunOperatorOnce(op1)
res1 = workspace.FetchBlob('concat_result1')
inf1 = workspace.FetchBlob('split_info1')
if not np.allclose(res0, res1, atol=0.0, rtol=0.0):
print(res1.flatten())
print(res0.flatten())
print(np.max(np.abs(res1 - res0)))
self.assertTrue(False)
if not np.allclose(inf0, inf1, atol=0.0, rtol=0.0):
print(inf1.flatten())
print(inf0.flatten())
print(np.max(np.abs(inf1 - inf0)))
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/concat_split_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ReluTest(hu.HypothesisTestCase):
@given(X=hu.tensor(),
inplace=st.booleans(),
**mu.gcs)
@settings(deadline=1000)
def test_relu(self, X, inplace, gc, dc):
op = core.CreateOperator(
"Relu",
["X"],
["Y"] if not inplace else ["X"],
)
# go away from the origin point to avoid kink problems
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs_cpu_ideep)
@settings(max_examples=10, deadline=None)
def test_int8_relu(self, size, input_channels, batch_size, inplace, gc, dc):
relu_fp32 = core.CreateOperator(
"Relu",
["X"],
["Y"] if not inplace else ["X"],
device_option=dc[0]
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
# go away from the origin point to avoid kink problems
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
if X.min() >=0:
scale = np.absolute(X).max() / 0xFF
zero_point = 0
else:
scale = np.absolute(X).max() / 0x7F
zero_point = 128
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob("X", X, dc[0])
workspace.RunOperatorOnce(relu_fp32)
Y = workspace.FetchBlob("X" if inplace else "Y")
workspace.ResetWorkspace()
sw2nhwc = core.CreateOperator(
"NCHW2NHWC",
["Xi"],
["Xi_nhwc"],
device_option=dc[1]
)
quantize = core.CreateOperator(
"Int8Quantize",
["Xi_nhwc"],
["Xi_quantized"],
engine="DNNLOWP",
device_option=dc[1],
Y_zero_point=zero_point,
Y_scale=scale,
)
relu = core.CreateOperator(
"Int8Relu",
["Xi_quantized"],
["Y_quantized"] if not inplace else ["Xi_quantized"],
engine="DNNLOWP",
device_option=dc[1],
)
dequantize = core.CreateOperator(
"Int8Dequantize",
["Y_quantized"] if not inplace else ["Xi_quantized"],
["Y_nhwc"],
engine="DNNLOWP",
device_option=dc[1],
)
sw2nchw = core.CreateOperator(
"NHWC2NCHW",
["Y_nhwc"],
["Y_out"],
device_option=dc[1]
)
net = caffe2_pb2.NetDef()
net.op.extend([sw2nhwc, quantize, relu, dequantize, sw2nchw])
workspace.FeedBlob("Xi", X, dc[1])
workspace.RunNetOnce(net)
Y_out = workspace.FetchBlob("Y_out")
MSE = np.square(np.subtract(Y, Y_out)).mean()
if MSE > 0.005:
print(Y.flatten())
print(Y_out.flatten())
print(np.max(np.abs(Y_out - Y)))
print("MSE", MSE)
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/relu_op_test.py
|
import unittest
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvTransposeTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 2),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
adj=st.integers(0, 2),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
training_mode=st.booleans(),
compute_dX=st.booleans(),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_convolution_transpose_gradients(self, stride, pad, kernel, adj,
size, input_channels,
output_channels, batch_size,
use_bias, training_mode,
compute_dX, gc, dc):
training = 1 if training_mode else 0
assume(adj < stride)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(
input_channels, output_channels, kernel, kernel)\
.astype(np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
op = core.CreateOperator(
"ConvTranspose",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
adj=adj,
training_mode=training,
no_gradient_to_input=not compute_dX,
)
inputs = [X, w, b] if use_bias else [X, w]
self.assertDeviceChecks(dc, op, inputs, [0], threshold=0.001)
if training_mode:
if use_bias and compute_dX:
# w, b, X
outputs_to_check = [1, 2, 0]
elif use_bias:
# w, b
outputs_to_check = [1, 2]
elif compute_dX:
# w, X
outputs_to_check = [1, 0]
else:
# w
outputs_to_check = [1]
for i in outputs_to_check:
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/conv_transpose_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class SigmoidTest(hu.HypothesisTestCase):
@given(X=hu.tensor(dtype=np.float32),
inplace=st.booleans(),
**hu.gcs)
@settings(deadline=1000)
def test_sigmoid(self, X, inplace, gc, dc):
op = core.CreateOperator(
"Sigmoid",
["X"],
["Y"] if not inplace else ["X"],
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/sigmoid_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestFallbackOps(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 10),
input_channels=st.integers(1, 3),
output_channels=st.integers(1, 5),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
**mu.gcs)
def test_in_place(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, gc, dc):
# To expose fallback in-place potential issue, the fallback op
# following ideep op must be run at least two iterations.
conv = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
device_option=dc[0]
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(output_channels, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X', X, dc[0])
workspace.FeedBlob('w', w, dc[0])
workspace.FeedBlob('b', b, dc[0])
workspace.RunOperatorOnce(conv)
Y = workspace.FetchBlob('Y')
scale = np.random.randn(Y.shape[1]).astype(np.float32)
bias = np.random.randn(Y.shape[1]).astype(np.float32)
ac = core.CreateOperator(
"AffineChannel",
["Y", "scale", "bias"],
["Y"],
is_learnable=False,
device_option=dc[0]
)
workspace.FeedBlob('scale', scale, dc[0])
workspace.FeedBlob('bias', bias, dc[0])
workspace.RunOperatorOnce(ac)
workspace.RunOperatorOnce(conv)
workspace.RunOperatorOnce(ac)
Y0 = workspace.FetchBlob('Y')
workspace.ResetWorkspace()
dev_net = caffe2_pb2.NetDef()
conv_dev = caffe2_pb2.OperatorDef()
conv_dev.CopyFrom(conv)
conv_dev.device_option.CopyFrom(dc[1])
ac_dev = caffe2_pb2.OperatorDef()
ac_dev.CopyFrom(ac)
ac_dev.device_option.CopyFrom(dc[1])
dev_net.op.extend([conv_dev, ac_dev])
workspace.FeedBlob('X', X, dc[1])
workspace.FeedBlob('w', w, dc[1])
workspace.FeedBlob('b', b, dc[1])
workspace.FeedBlob('scale', scale, dc[1])
workspace.FeedBlob('bias', bias, dc[1])
workspace.RunNetOnce(dev_net)
workspace.RunNetOnce(dev_net)
Y1 = workspace.FetchBlob('Y')
if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/operator_fallback_op_test.py
|
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
@given(n=st.integers(4, 8), nesterov=st.booleans(),
**mu.gcs)
def test_MomentumSGDUpdate(self, n, nesterov, gc, dc):
param = np.random.rand(n).astype(np.float32)
grad = np.random.rand(n).astype(np.float32)
lr = np.random.rand(1).astype(np.float32)
param_momentum = np.random.rand(n).astype(np.float32)
momentum = 0.9
op = core.CreateOperator(
"MomentumSGDUpdate",
["grad", "param_momentum", "lr", "param"],
["grad", "param_momentum", "param"],
momentum=momentum,
nesterov=int(nesterov),
)
# Iter lives on the CPU
input_device_options = {'lr': hu.cpu_do}
self.assertDeviceChecks(
dc,
op,
[grad, param_momentum, lr, param],
[0],
input_device_options=input_device_options,
threshold=0.001)
op_noparam = core.CreateOperator(
"MomentumSGD",
["grad", "param_momentum", "lr"],
["grad", "param_momentum"],
momentum=momentum,
nesterov=int(nesterov),
)
self.assertDeviceChecks(
dc,
op_noparam,
[grad, param_momentum, lr],
[0],
input_device_options=input_device_options,
threshold=0.001)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/moment_sgd_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ShapeTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 128),
c=st.integers(1, 128),
h=st.integers(1, 128),
w=st.integers(1, 128),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_shape(self, n, c, h, w, gc, dc):
op0 = core.CreateOperator(
"Shape",
["X0"],
["Y0"],
device_option=dc[0]
)
op1 = core.CreateOperator(
"Shape",
["X1"],
["Y1"],
device_option=dc[1]
)
X = np.random.rand(n, c, h, w).astype(np.float32) - 0.5
workspace.FeedBlob('X0', X, dc[0])
workspace.FeedBlob('X1', X, dc[1])
workspace.RunOperatorOnce(op0)
workspace.RunOperatorOnce(op1)
Y0 = workspace.FetchBlob('Y0')
Y1 = workspace.FetchBlob('Y1')
if not np.allclose(Y0, Y1, atol=0, rtol=0):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
@given(n=st.integers(1, 128),
c=st.integers(1, 128),
h=st.integers(1, 128),
w=st.integers(1, 128),
axes=st.lists(st.integers(0, 3), min_size=1, max_size=3),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_shape_with_axes(self, n, c, h, w, axes, gc, dc):
axes = list(set(axes)).sort()
op0 = core.CreateOperator(
"Shape",
["X0"],
["Y0"],
axes = axes,
device_option=dc[0]
)
op1 = core.CreateOperator(
"Shape",
["X1"],
["Y1"],
axes = axes,
device_option=dc[1]
)
X = np.random.rand(n, c, h, w).astype(np.float32) - 0.5
workspace.FeedBlob('X0', X, dc[0])
workspace.FeedBlob('X1', X, dc[1])
workspace.RunOperatorOnce(op0)
workspace.RunOperatorOnce(op1)
Y0 = workspace.FetchBlob('Y0')
Y1 = workspace.FetchBlob('Y1')
if not np.allclose(Y0, Y1, atol=0, rtol=0):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/shape_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.transformations import optimizeForMKLDNN
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 10),
input_channels=st.integers(1, 3),
output_channels=st.integers(1, 5),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
training_mode=st.booleans(),
group=st.integers(1, 2),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, training_mode, group, gc, dc):
training = 1 if training_mode else 0
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
group=group,
training_mode=training,
)
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
inputs = [X, w, b] if use_bias else [X, w]
self.assertDeviceChecks(dc, op, inputs, [0])
if training_mode:
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0], threshold=0.01)
@settings(max_examples=10, deadline=None)
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
size=st.integers(8, 10),
input_channels=st.integers(16, 32),
output_channels=st.integers(16, 32),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
training_mode=st.booleans(),
**mu.gcs)
def test_winograd_convolution(self, stride, pad, size,
input_channels, output_channels,
batch_size, use_bias, training_mode, gc, dc):
training = 1 if training_mode else 0
conv3x3_winograd_algorithm = 1
kernel = 3
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
training_mode=training,
algorithm=conv3x3_winograd_algorithm
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
inputs = [X, w, b] if use_bias else [X, w]
self.assertDeviceChecks(dc, op, inputs, [0])
if training_mode:
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0], threshold=0.01)
@given(batch_size=st.integers(1, 3), **mu.gcs)
def test_depthwise_convolution(self, batch_size, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
["Y"],
stride=1,
pad=0,
kernel=1,
group=4,
device_option=dc[0]
)
op1 = core.CreateOperator(
"Conv",
["X", "w", "b"],
["Y"],
stride=1,
pad=0,
kernel=1,
group=4,
device_option=dc[1]
)
X = np.random.rand(batch_size, 544, 14, 14).astype(np.float32)
w = np.random.rand(544, 136, 1, 1).astype(np.float32)
b = np.random.rand(544).astype(np.float32)
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X', X, dc[0])
workspace.FeedBlob('w', w, dc[0])
workspace.FeedBlob('b', b, dc[0])
workspace.RunOperatorOnce(op)
Y0 = workspace.FetchBlob('Y')
workspace.ResetWorkspace()
workspace.FeedBlob('X', X, dc[1])
workspace.FeedBlob('w', w, dc[1])
workspace.FeedBlob('b', b, dc[1])
net = core.Net("net")
old_net = caffe2_pb2.NetDef()
old_net.op.extend([op1])
net.Proto().CopyFrom(old_net)
optimizeForMKLDNN(net)
workspace.RunOperatorOnce(net.Proto().op[0])
Y1 = workspace.FetchBlob('Y')
if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
workspace.ResetWorkspace()
workspace.FeedBlob('X', X, dc[1])
workspace.FeedBlob('w', w, dc[1])
workspace.FeedBlob('b', b, dc[1])
workspace.RunOperatorOnce(op1)
Y2 = workspace.FetchBlob('Y')
if not np.allclose(Y0, Y2, atol=0.01, rtol=0.01):
print(Y2.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y2 - Y0)))
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/conv_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import (
brew,
core,
model_helper,
workspace,
)
from caffe2.python.transformations import optimizeForMKLDNN
import caffe2.python.hypothesis_test_util as hu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class PreConvertTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(15, 16),
batch_size=st.integers(1, 3))
def test_preConvert(self, input_channels, batch_size):
def AddModel(model, data):
conv1 = brew.conv(model, data, 'conv1', dim_in=input_channels,
dim_out=10, kernel=3, stride=1, pad=1, training_mode=1)
deconv1 = brew.conv_transpose(model, conv1, 'deconv1', dim_in=10, dim_out=10,
kernel=2, stride=2, pad=0, training_mode=1)
fc1 = brew.fc(model, deconv1, 'fc1', dim_in=10 * 56 * 56, dim_out=3)
softmax = brew.softmax(model, fc1, 'softmax')
return softmax
def AddTrainingOperators(model, softmax, label):
"""Adds training operators to the model."""
# Compute cross entropy between softmax scores and labels
xent = model.LabelCrossEntropy([softmax, label], 'xent')
# Compute the expected loss
loss = model.AveragedLoss(xent, "loss")
# Use the average loss we just computed to add gradient operators to the model
model.AddGradientOperators([loss])
arg_scope = {"order": "NCHW", 'no_bias': False}
# Create the model helper for the train model
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
train_model = model_helper.ModelHelper(name="test_train", arg_scope=arg_scope)
# Add the model definition (fc layers, conv layers, softmax, etc.)
softmax = AddModel(train_model, "X")
AddTrainingOperators(train_model, softmax, "label")
X = np.random.rand(
batch_size, input_channels, 28, 28).astype(np.float32) - 0.5
label = np.random.randint(3, size=batch_size).astype(np.int32)
blob_dict = {}
output_dict = {}
output_dict_cosim = {}
old_ws_name = workspace.CurrentWorkspace()
workspace.FeedBlob('X', X)
workspace.FeedBlob('label', label)
workspace.RunNetOnce(train_model.param_init_net)
for op in train_model.net.Proto().op:
if op.type == "Softmax":
break
for j in range(1, len(op.input)):
blob_dict[op.input[j]] = workspace.FetchBlob(op.input[j])
workspace.CreateNet(train_model.net, overwrite=True)
optimizeForMKLDNN(train_model.net, training_mode=True)
workspace.RunNet(train_model.net)
for op in train_model.net.Proto().op:
for blob in op.output:
output_dict[blob] = workspace.FetchBlob(blob)
workspace.SwitchWorkspace("_device_check_", True)
workspace.FeedBlob('X', X)
workspace.FeedBlob('label', label)
for blob in blob_dict.keys():
workspace.FeedBlob(blob, blob_dict[blob])
workspace.CreateNet(train_model.net, overwrite=True)
workspace.RunNet(train_model.net)
for blob in output_dict.keys():
output_dict_cosim[blob] = workspace.FetchBlob(blob)
for blob in output_dict.keys():
if not np.allclose(output_dict[blob], output_dict_cosim[blob], atol=0.001, rtol=0.0001):
print("blob {} error".format(blob))
print(np.max(np.abs(output_dict[blob] - output_dict_cosim[blob])))
self.assertTrue(False)
workspace.ResetWorkspace()
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/ideep/pre_convert_test.py
|
pytorch-master
|
caffe2/python/mint/__init__.py
|
|
## @package app
# Module caffe2.python.mint.app
import argparse
import flask
import glob
import numpy as np
import nvd3
import os
import sys
# pyre-fixme[21]: Could not find module `tornado.httpserver`.
import tornado.httpserver
# pyre-fixme[21]: Could not find a module corresponding to import `tornado.wsgi`
import tornado.wsgi
__folder__ = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(
__name__,
template_folder=os.path.join(__folder__, "templates"),
static_folder=os.path.join(__folder__, "static")
)
args = None
def jsonify_nvd3(chart):
chart.buildcontent()
# Note(Yangqing): python-nvd3 does not seem to separate the built HTML part
# and the script part. Luckily, it seems to be the case that the HTML part is
# only a <div>, which can be accessed by chart.container; the script part,
# while the script part occupies the rest of the html content, which we can
# then find by chart.htmlcontent.find['<script>'].
script_start = chart.htmlcontent.find('<script>') + 8
script_end = chart.htmlcontent.find('</script>')
return flask.jsonify(
result=chart.container,
script=chart.htmlcontent[script_start:script_end].strip()
)
def visualize_summary(filename):
try:
data = np.loadtxt(filename)
except Exception as e:
return 'Cannot load file {}: {}'.format(filename, str(e))
chart_name = os.path.splitext(os.path.basename(filename))[0]
chart = nvd3.lineChart(
name=chart_name + '_summary_chart',
height=args.chart_height,
y_axis_format='.03g'
)
if args.sample < 0:
step = max(data.shape[0] / -args.sample, 1)
else:
step = args.sample
xdata = np.arange(0, data.shape[0], step)
# data should have 4 dimensions.
chart.add_serie(x=xdata, y=data[xdata, 0], name='min')
chart.add_serie(x=xdata, y=data[xdata, 1], name='max')
chart.add_serie(x=xdata, y=data[xdata, 2], name='mean')
chart.add_serie(x=xdata, y=data[xdata, 2] + data[xdata, 3], name='m+std')
chart.add_serie(x=xdata, y=data[xdata, 2] - data[xdata, 3], name='m-std')
return jsonify_nvd3(chart)
def visualize_print_log(filename):
try:
data = np.loadtxt(filename)
if data.ndim == 1:
data = data[:, np.newaxis]
except Exception as e:
return 'Cannot load file {}: {}'.format(filename, str(e))
chart_name = os.path.splitext(os.path.basename(filename))[0]
chart = nvd3.lineChart(
name=chart_name + '_log_chart',
height=args.chart_height,
y_axis_format='.03g'
)
if args.sample < 0:
step = max(data.shape[0] / -args.sample, 1)
else:
step = args.sample
xdata = np.arange(0, data.shape[0], step)
# if there is only one curve, we also show the running min and max
if data.shape[1] == 1:
# We also print the running min and max for the steps.
trunc_size = data.shape[0] / step
running_mat = data[:trunc_size * step].reshape((trunc_size, step))
chart.add_serie(
x=xdata[:trunc_size],
y=running_mat.min(axis=1),
name='running_min'
)
chart.add_serie(
x=xdata[:trunc_size],
y=running_mat.max(axis=1),
name='running_max'
)
chart.add_serie(x=xdata, y=data[xdata, 0], name=chart_name)
else:
for i in range(0, min(data.shape[1], args.max_curves)):
# data should have 4 dimensions.
chart.add_serie(
x=xdata,
y=data[xdata, i],
name='{}[{}]'.format(chart_name, i)
)
return jsonify_nvd3(chart)
def visualize_file(filename):
fullname = os.path.join(args.root, filename)
if filename.endswith('summary'):
return visualize_summary(fullname)
elif filename.endswith('log'):
return visualize_print_log(fullname)
else:
return flask.jsonify(
result='Unsupport file: {}'.format(filename),
script=''
)
@app.route('/')
def index():
files = glob.glob(os.path.join(args.root, "*.*"))
files.sort()
names = [os.path.basename(f) for f in files]
return flask.render_template(
'index.html',
root=args.root,
names=names,
debug_messages=names
)
@app.route('/visualization/<string:name>')
def visualization(name):
ret = visualize_file(name)
return ret
def main(argv):
parser = argparse.ArgumentParser("The mint visualizer.")
parser.add_argument(
'-p',
'--port',
type=int,
default=5000,
help="The flask port to use."
)
parser.add_argument(
'-r',
'--root',
type=str,
default='.',
help="The root folder to read files for visualization."
)
parser.add_argument(
'--max_curves',
type=int,
default=5,
help="The max number of curves to show in a dump tensor."
)
parser.add_argument(
'--chart_height',
type=int,
default=300,
help="The chart height for nvd3."
)
parser.add_argument(
'-s',
'--sample',
type=int,
default=-200,
help="Sample every given number of data points. A negative "
"number means the total points we will sample on the "
"whole curve. Default 100 points."
)
global args
args = parser.parse_args(argv)
server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app))
server.listen(args.port)
print("Tornado server starting on port {}.".format(args.port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main(sys.argv[1:])
|
pytorch-master
|
caffe2/python/mint/app.py
|
# @package adaptive_weight
# Module caffe2.fb.python.layers.adaptive_weight
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.regularizer import BoundedGradientProjection, LogBarrier
"""
Implementation of adaptive weighting: https://arxiv.org/pdf/1705.07115.pdf
"""
class AdaptiveWeight(ModelLayer):
def __init__(
self,
model,
input_record,
name="adaptive_weight",
optimizer=None,
weights=None,
enable_diagnose=False,
estimation_method="log_std",
pos_optim_method="log_barrier",
reg_lambda=0.1,
**kwargs
):
super(AdaptiveWeight, self).__init__(model, name, input_record, **kwargs)
self.output_schema = schema.Scalar(
np.float32, self.get_next_blob_reference("adaptive_weight")
)
self.data = self.input_record.field_blobs()
self.num = len(self.data)
self.optimizer = optimizer
if weights is not None:
assert len(weights) == self.num
else:
weights = [1. / self.num for _ in range(self.num)]
assert min(weights) > 0, "initial weights must be positive"
self.weights = np.array(weights).astype(np.float32)
self.estimation_method = str(estimation_method).lower()
# used in positivity-constrained parameterization as when the estimation method
# is inv_var, with optimization method being either log barrier, or grad proj
self.pos_optim_method = str(pos_optim_method).lower()
self.reg_lambda = float(reg_lambda)
self.enable_diagnose = enable_diagnose
self.init_func = getattr(self, self.estimation_method + "_init")
self.weight_func = getattr(self, self.estimation_method + "_weight")
self.reg_func = getattr(self, self.estimation_method + "_reg")
self.init_func()
if self.enable_diagnose:
self.weight_i = [
self.get_next_blob_reference("adaptive_weight_%d" % i)
for i in range(self.num)
]
for i in range(self.num):
self.model.add_ad_hoc_plot_blob(self.weight_i[i])
def concat_data(self, net):
reshaped = [net.NextScopedBlob("reshaped_data_%d" % i) for i in range(self.num)]
# coerce shape for single real values
for i in range(self.num):
net.Reshape(
[self.data[i]],
[reshaped[i], net.NextScopedBlob("new_shape_%d" % i)],
shape=[1],
)
concated = net.NextScopedBlob("concated_data")
net.Concat(
reshaped, [concated, net.NextScopedBlob("concated_new_shape")], axis=0
)
return concated
def log_std_init(self):
"""
mu = 2 log sigma, sigma = standard variance
per task objective:
min 1 / 2 / e^mu X + mu / 2
"""
values = np.log(1. / 2. / self.weights)
initializer = (
"GivenTensorFill",
{"values": values, "dtype": core.DataType.FLOAT},
)
self.mu = self.create_param(
param_name="mu",
shape=[self.num],
initializer=initializer,
optimizer=self.optimizer,
)
def log_std_weight(self, x, net, weight):
"""
min 1 / 2 / e^mu X + mu / 2
"""
mu_neg = net.NextScopedBlob("mu_neg")
net.Negative(self.mu, mu_neg)
mu_neg_exp = net.NextScopedBlob("mu_neg_exp")
net.Exp(mu_neg, mu_neg_exp)
net.Scale(mu_neg_exp, weight, scale=0.5)
def log_std_reg(self, net, reg):
net.Scale(self.mu, reg, scale=0.5)
def inv_var_init(self):
"""
k = 1 / variance
per task objective:
min 1 / 2 * k X - 1 / 2 * log k
"""
values = 2. * self.weights
initializer = (
"GivenTensorFill",
{"values": values, "dtype": core.DataType.FLOAT},
)
if self.pos_optim_method == "log_barrier":
regularizer = LogBarrier(reg_lambda=self.reg_lambda)
elif self.pos_optim_method == "pos_grad_proj":
regularizer = BoundedGradientProjection(lb=0, left_open=True)
else:
raise TypeError(
"unknown positivity optimization method: {}".format(
self.pos_optim_method
)
)
self.k = self.create_param(
param_name="k",
shape=[self.num],
initializer=initializer,
optimizer=self.optimizer,
regularizer=regularizer,
)
def inv_var_weight(self, x, net, weight):
net.Scale(self.k, weight, scale=0.5)
def inv_var_reg(self, net, reg):
log_k = net.NextScopedBlob("log_k")
net.Log(self.k, log_k)
net.Scale(log_k, reg, scale=-0.5)
def _add_ops_impl(self, net, enable_diagnose):
x = self.concat_data(net)
weight = net.NextScopedBlob("weight")
reg = net.NextScopedBlob("reg")
weighted_x = net.NextScopedBlob("weighted_x")
weighted_x_add_reg = net.NextScopedBlob("weighted_x_add_reg")
self.weight_func(x, net, weight)
self.reg_func(net, reg)
net.Mul([weight, x], weighted_x)
net.Add([weighted_x, reg], weighted_x_add_reg)
net.SumElements(weighted_x_add_reg, self.output_schema())
if enable_diagnose:
for i in range(self.num):
net.Slice(weight, self.weight_i[i], starts=[i], ends=[i + 1])
def add_ops(self, net):
self._add_ops_impl(net, self.enable_diagnose)
|
pytorch-master
|
caffe2/python/layers/adaptive_weight.py
|
## @package sampling_train
# Module caffe2.python.layers.sampling_train
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer, get_layer_class
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
output_dims,
subtract_log_odd=True,
name='sampling_train',
**kwargs
):
super(SamplingTrain, self).__init__(
model, name, input_record, **kwargs
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
assert 'sampling_prob' in input_record
self._prediction_layer = layer_class(
model,
input_record.input,
output_dims=output_dims,
**kwargs
)
self._prediction_layer.train_param_blobs = [
model.net.NextBlob(str(blob) + '_sampled')
for blob in self._prediction_layer.param_blobs
]
self.params = self._prediction_layer.params
self.output_schema = self._prediction_layer.output_schema
def add_ops(self, net):
self._prediction_layer.add_ops(net)
def add_train_ops(self, net):
for full_blob, sampled_blob in zip(
self._prediction_layer.param_blobs,
self._prediction_layer.train_param_blobs
):
net.Gather([full_blob, self.input_record.indices()], sampled_blob)
self._prediction_layer.add_train_ops(net)
if not self.subtract_log_odd:
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1)
|
pytorch-master
|
caffe2/python/layers/sampling_train.py
|
## @package tags
# Module caffe2.python.layers.tags
import functools
from caffe2.python import context
class TagContext(context.DefaultManaged):
"""
Scope driven way to provide tags to the layers.
"""
def __init__(self, tags=None):
# Tags is expected to be list to keep order of adding/removing things
self.tags = tags or []
def add_tags(self, tags):
self.tags.extend(tags)
def remove_tags(self, tags):
assert self.tags[-len(tags):] == tags
self.tags = self.tags[:-len(tags)]
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
# split later
EXCLUDE_FROM_TRAIN = 'exclude_from_train'
EXCLUDE_FROM_EVAL = 'exclude_from_eval'
EXCLUDE_FROM_PREDICTION = 'exclude_from_prediction'
EXCLUDE_FROM_ACCUMULATE_PRED = 'exclude_from_accumulate_pred'
PREPROCESSING = 'preprocessing'
HANDLE_AS_SPARSE_LAYER = 'handle_as_sparse_layer'
PREFER_GPU = 'prefer_gpu'
CPU_ONLY = 'cpu_only'
LOCAL = 'local'
# The following three tags are hints to **distributed training framework**.
"""
Indicates a layer contains a sparse shardable parameter. The parameter
should be sharded nd operators on those parameters should be done on
distributed parameter servers.
"""
SPARSE_SHARDED = 'sparse_sharded'
"""
Indicates a layer contains a sparse parameters among others, and that the
parameters should not be sharded (i.e. should be placed together on a node).
"""
SPARSE_DONT_SHARD = 'sparse_dont_shard'
"""
Used to manually indicate a component for an operator. Parameters for
all operators with the same component should be colocated on the same
parameter server.
"""
COMPONENT = 'component:'
PIPELINE = 'pipeline:'
"""
Indicate it's a dense layer or dense param init,
but we use hogwild across multiple trainers
"""
HOGWILD_DENSE = "hogwild_dense"
"""
Valid tag prefixes for distributed training framework.
"""
"""
Used to pass on info to the 'extra_info' field in the net
Proto. Typically to provide info for distributed training.
"""
EXTRA_INFO = 'extra_info:'
"""
An empty tag, used to make conditional statement on with(Tags) block more concise
"""
EMPTY_TAG = 'empty_tag'
DT_TAGS = (SPARSE_SHARDED, SPARSE_DONT_SHARD, COMPONENT, HOGWILD_DENSE)
# In certain cases we want to have different schema for training and
# prediction, as an example in prediction we might need to have only
# subset of ids present in the original schema. This tag is one of the ways
# to mark operators that will be removed from prediction and should
# override schema for predictors.
PREDICTION_SCHEMA = 'prediction_schema'
# This is to mark layers in the feature transform process.
FEATURE_TRANSFORM = 'feature_transform'
# This is to mark the output layers in the feature transform process
FEATURE_TRANSFORM_SCHEMA = 'feature_transform_schema'
def __init__(self, tags):
if not isinstance(tags, list):
tags = [tags]
self.tags = tags
def __enter__(self):
TagContext.current().add_tags(self.tags)
return self
def __exit__(self, type, value, traceback):
TagContext.current().remove_tags(self.tags)
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
# pyre-fixme[16]: Tags has no attribute `TRAIN_ONLY`
Tags.TRAIN_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
# pyre-fixme[16]: Tags has no attribute `EVAL_ONLY`
Tags.EVAL_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_TRAIN,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
# pyre-fixme[16]: Tags has no attribute `PREDICTION_ONLY`
Tags.PREDICTION_ONLY = [Tags.EXCLUDE_FROM_TRAIN, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
|
pytorch-master
|
caffe2/python/layers/tags.py
|
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class MapToRange(ModelLayer):
"""
This layer aims to build a mapping from raw keys to indices within [0, max_index).
The mapping is continuously built during training. The mapping will be frozen during
evaluation and prediction. Unseen keys will be assigned to index 0.
"""
def __init__(
self, model,
input_record,
max_index,
name='map_to_range',
**kwargs
):
super(MapToRange, self).__init__(model, name, input_record, **kwargs)
assert max_index > 0
assert isinstance(input_record, schema.Scalar)
self.max_index = max_index
self.handler = self.create_param(
param_name='handler',
shape=[],
initializer=('LongIndexCreate', {'max_elements': self.max_index}),
optimizer=model.NoOptim
)
self.output_schema = schema.Struct(
('indices', schema.Scalar(
np.int64, self.get_next_blob_reference("indices")
)),
('handler', schema.Scalar(
np.void, self.handler
)),
)
def add_train_ops(self, net):
if self.input_record.field_type().base != np.int64:
keys = net.Cast(
self.input_record(),
net.NextScopedBlob("indices_before_mapping"),
to=core.DataType.INT64
)
else:
keys = self.input_record()
# Load keys into indices
indices = net.IndexGet([self.handler, keys],
self.output_schema.indices())
net.StopGradient(indices, indices)
def add_eval_ops(self, net):
net.IndexFreeze(self.handler, self.handler)
self.add_train_ops(net)
def add_ops(self, net):
self.add_eval_ops(net)
|
pytorch-master
|
caffe2/python/layers/build_index.py
|
## @package bucket_weighted
# Module caffe2.python.layers.bucket_weighted
import logging
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
)
from caffe2.python.layers.tags import Tags
logger = logging.getLogger(__name__)
class BucketWeighted(ModelLayer):
def __init__(self, model, input_record, max_score=0, bucket_boundaries=None,
hash_buckets=True, weight_optim=None, name="bucket_weighted"):
super(BucketWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
self.bucket_boundaries = bucket_boundaries
self.hash_buckets = hash_buckets
if bucket_boundaries is not None:
self.shape = len(bucket_boundaries) + 1
elif max_score > 0:
self.shape = max_score
else:
self.shape = get_categorical_limit(input_record)
self.bucket_w = self.create_param(param_name='bucket_w',
shape=[self.shape, ],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=weight_optim)
self.output_schema = schema.Struct(
('bucket_weights',
schema.Scalar((np.float32, self.shape),
self.get_next_blob_reference("bucket_w_gather")))
)
self.tags.update({Tags.HANDLE_AS_SPARSE_LAYER})
def get_memory_usage(self):
return self.shape
def add_ops(self, net):
if self.bucket_boundaries is not None:
buckets_int = net.Bucketize(
self.input_record.values(),
"buckets_int",
boundaries=self.bucket_boundaries
)
else:
buckets = self.input_record.values()
buckets_int = net.Cast(
buckets,
"buckets_int",
to=core.DataType.INT32
)
if self.hash_buckets:
buckets_int = net.IndexHash(
buckets_int, "hashed_buckets_int", seed=0, modulo=self.shape
)
net.Gather(
[self.bucket_w, buckets_int],
self.output_schema.bucket_weights.field_blobs())
|
pytorch-master
|
caffe2/python/layers/bucket_weighted.py
|
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class BatchNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_normalization',
scale_optim=None,
bias_optim=None,
momentum=0.9,
order='NCHW',
scale_init_value=1.0,
**kwargs
):
super(BatchNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.input_shape = input_record.field_type().shape
if len(self.input_shape) == 3:
if order == "NCHW":
input_dims = self.input_shape[0]
elif order == "NHWC":
input_dims = self.input_shape[2]
else:
raise ValueError("Please specify a correct order")
else:
assert len(self.input_shape) == 1, (
"This layer supports only 4D or 2D tensors")
input_dims = self.input_shape[0]
self.output_schema = schema.Scalar(
(np.float32, self.input_shape),
self.get_next_blob_reference('output')
)
self.momentum = momentum
self.order = order
self.scale = self.create_param(param_name='scale',
shape=[input_dims],
initializer=('ConstantFill', {'value': scale_init_value}),
optimizer=scale_optim)
self.bias = self.create_param(param_name='bias',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=bias_optim)
self.rm = self.create_param(param_name='running_mean',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=model.NoOptim)
self.riv = self.create_param(param_name='running_inv_var',
shape=[input_dims],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=model.NoOptim)
def _add_ops(self, net, is_test, out_blob=None):
original_input_blob = self.input_record.field_blobs()
input_blob = net.NextScopedBlob('expand_input')
if len(self.input_shape) == 1:
input_blob = net.ExpandDims(original_input_blob,
dims=[2, 3])
else:
input_blob = original_input_blob[0]
if out_blob is None:
bn_output = self.output_schema.field_blobs()
else:
bn_output = out_blob
if is_test:
output_blobs = bn_output
else:
output_blobs = bn_output + [self.rm, self.riv,
net.NextScopedBlob('bn_saved_mean'),
net.NextScopedBlob('bn_saved_iv')]
net.SpatialBN([input_blob, self.scale,
self.bias, self.rm, self.riv],
output_blobs,
momentum=self.momentum,
is_test=is_test,
order=self.order)
if len(self.input_shape) == 1:
net.Squeeze(bn_output,
bn_output,
dims=[2, 3])
def add_train_ops(self, net):
self._add_ops(net, is_test=False)
def add_eval_ops(self, net):
self._add_ops(net, is_test=True)
def add_ops(self, net):
self.add_eval_ops(net)
|
pytorch-master
|
caffe2/python/layers/batch_normalization.py
|
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
IdList
)
import numpy as np
class MergeIdLists(ModelLayer):
"""Merge multiple ID_LISTs into a single ID_LIST
Args:
model: A layer model instance
input_record: Tuple (Struct) of ID_LIST features to be
merged
Returns:
the merged ID_LIST feature
"""
def __init__(self, model, input_record, name='merged'):
super(MergeIdLists, self).__init__(model, name, input_record)
assert all(schema.equal_schemas(x, IdList) for x in input_record), \
"Inputs to MergeIdLists should all be IdLists."
assert all(record.items.metadata is not None
for record in self.input_record), \
"Features without metadata are not supported"
merge_dim = max(get_categorical_limit(record)
for record in self.input_record)
assert merge_dim is not None, "Unbounded features are not supported"
self.output_schema = schema.NewRecord(
model.net, schema.List(
schema.Scalar(
np.int64,
blob=model.net.NextBlob(name),
metadata=schema.Metadata(categorical_limit=merge_dim)
)))
def add_ops(self, net):
return net.MergeIdLists(self.input_record.field_blobs(),
self.output_schema.field_blobs())
|
pytorch-master
|
caffe2/python/layers/merge_id_lists.py
|
# @package homotopy_weight
# Module caffe2.fb.python.layers.homotopy_weight
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
import logging
logger = logging.getLogger(__name__)
'''
Homotopy Weighting between two weights x, y by doing:
alpha x + beta y
where alpha is a decreasing scalar parameter ranging from [min, max] (default,
[0, 1]), and alpha + beta = max + min, which means that beta is increasing in
the range [min, max];
Homotopy methods first solves an "easy" problem (one to which the solution is
well known), and is gradually transformed into the target problem
'''
class HomotopyWeight(ModelLayer):
def __init__(
self,
model,
input_record,
name='homotopy_weight',
min_weight=0.,
max_weight=1.,
half_life=1e6,
quad_life=3e6,
atomic_iter=None,
**kwargs
):
super(HomotopyWeight,
self).__init__(model, name, input_record, **kwargs)
self.output_schema = schema.Scalar(
np.float32, self.get_next_blob_reference('homotopy_weight')
)
data = self.input_record.field_blobs()
assert len(data) == 2
self.x = data[0]
self.y = data[1]
# TODO: currently model building does not have access to iter counter or
# learning rate; it's added at optimization time;
self.use_external_iter = (atomic_iter is not None)
self.atomic_iter = (
atomic_iter if self.use_external_iter else self.create_atomic_iter()
)
# to map lr to [min, max]; alpha = scale * lr + offset
assert max_weight > min_weight
self.scale = float(max_weight - min_weight)
self.offset = self.model.add_global_constant(
'%s_offset_1dfloat' % self.name, float(min_weight)
)
self.gamma, self.power = self.solve_inv_lr_params(half_life, quad_life)
def solve_inv_lr_params(self, half_life, quad_life):
# ensure that the gamma, power is solvable
assert half_life > 0
# convex monotonically decreasing
assert quad_life > 2 * half_life
t = float(quad_life) / float(half_life)
x = t * (1.0 + np.sqrt(2.0)) / 2.0 - np.sqrt(2.0)
gamma = (x - 1.0) / float(half_life)
power = np.log(2.0) / np.log(x)
logger.info(
'homotopy_weighting: found lr param: gamma=%g, power=%g' %
(gamma, power)
)
return gamma, power
def create_atomic_iter(self):
self.mutex = self.create_param(
param_name=('%s_mutex' % self.name),
shape=None,
initializer=('CreateMutex', ),
optimizer=self.model.NoOptim,
)
self.atomic_iter = self.create_param(
param_name=('%s_atomic_iter' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64
}
),
optimizer=self.model.NoOptim,
)
return self.atomic_iter
def update_weight(self, net):
alpha = net.NextScopedBlob('alpha')
beta = net.NextScopedBlob('beta')
lr = net.NextScopedBlob('lr')
comp_lr = net.NextScopedBlob('complementary_lr')
scaled_lr = net.NextScopedBlob('scaled_lr')
scaled_comp_lr = net.NextScopedBlob('scaled_complementary_lr')
if not self.use_external_iter:
net.AtomicIter([self.mutex, self.atomic_iter], [self.atomic_iter])
net.LearningRate(
[self.atomic_iter],
[lr],
policy='inv',
gamma=self.gamma,
power=self.power,
base_lr=1.0,
)
net.Sub([self.model.global_constants['ONE'], lr], [comp_lr])
net.Scale([lr], [scaled_lr], scale=self.scale)
net.Scale([comp_lr], [scaled_comp_lr], scale=self.scale)
net.Add([scaled_lr, self.offset], [alpha])
net.Add([scaled_comp_lr, self.offset], [beta])
return alpha, beta
def add_ops(self, net):
alpha, beta = self.update_weight(net)
# alpha x + beta y
net.WeightedSum([self.x, alpha, self.y, beta], self.output_schema())
|
pytorch-master
|
caffe2/python/layers/homotopy_weight.py
|
## @package sampling_trainable_mixin
# Module caffe2.python.layers.sampling_trainable_mixin
import abc
class SamplingTrainableMixin(metaclass=abc.ABCMeta):
def __init__(self, *args, **kwargs):
super(SamplingTrainableMixin, self).__init__(*args, **kwargs)
self._train_param_blobs = None
self._train_param_blobs_frozen = False
@property
@abc.abstractmethod
def param_blobs(self):
"""
List of parameter blobs for prediction net
"""
pass
@property
def train_param_blobs(self):
"""
If train_param_blobs is not set before used, default to param_blobs
"""
if self._train_param_blobs is None:
self.train_param_blobs = self.param_blobs
return self._train_param_blobs
@train_param_blobs.setter
def train_param_blobs(self, blobs):
assert not self._train_param_blobs_frozen
assert blobs is not None
self._train_param_blobs_frozen = True
self._train_param_blobs = blobs
@abc.abstractmethod
def _add_ops(self, net, param_blobs):
"""
Add ops to the given net, using the given param_blobs
"""
pass
def add_ops(self, net):
self._add_ops(net, self.param_blobs)
def add_train_ops(self, net):
self._add_ops(net, self.train_param_blobs)
|
pytorch-master
|
caffe2/python/layers/sampling_trainable_mixin.py
|
## @package last_n_window_collector
# Module caffe2.python.layers.last_n_window_collector
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class LastNWindowCollector(ModelLayer):
"""
Collect last-N samples from input record. If you have complex data,
use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='last_n_window_collector', **kwargs):
super(LastNWindowCollector, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
assert isinstance(input_record, schema.Scalar), \
"Got {!r}".format(input_record)
self.last_n = self.create_param(param_name='last_n',
shape=[0],
initializer=('ConstantFill', {}),
optimizer=model.NoOptim)
self.next_blob = self.create_param(
param_name='next',
shape=[],
initializer=('ConstantFill',
{'value': 0, 'dtype': core.DataType.INT32}),
optimizer=model.NoOptim
)
self.mutex = self.create_param(
param_name='mutex',
shape=[],
initializer=('CreateMutex',),
optimizer=model.NoOptim,
)
self.num_visited_blob = self.create_param(
param_name='num_visited',
shape=[],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.output_schema = schema.Struct(
(
'last_n',
schema.from_blob_list(input_record, [self.last_n])
),
('num_visited', schema.Scalar(blob=self.num_visited_blob)),
('mutex', schema.Scalar(blob=self.mutex)),
)
def add_ops(self, net):
net.LastNWindowCollector(
[self.last_n, self.next_blob, self.input_record(), self.mutex,
self.num_visited_blob],
[self.last_n, self.next_blob, self.num_visited_blob],
num_to_collect=self.num_to_collect,
)
|
pytorch-master
|
caffe2/python/layers/last_n_window_collector.py
|
from caffe2.python import schema
from caffe2.python.layers.layers import (
IdList,
ModelLayer,
)
# Model layer for implementing probabilistic replacement of individual elements in
# IdLists. Takes probabilities for train, eval and predict nets as input, as
# well as the replacement value when dropout happens. For features we may have
# available to us in train net but not in predict net, we'd set dropout
# probability for predict net to be 1.0 and set the feature to the replacement
# value given here. This way, the value is tied to the particular model and not
# to any specific logic in feature processing in serving.
# Consider the following example where X is the values in the IdList and Lengths
# is the number of values corresponding to each example.
# X: [1, 2, 3, 4, 5]
# Lengths: [2, 3]
# This IdList contains 2 IdList features of lengths 2, 3. Let's assume we used a
# ratio of 0.5 and ended up dropping out 2nd item in 2nd IdList feature, and used a
# replacement value of -1. We will end up with the following IdList.
# Y: [1, 2, 3, -1, 5]
# OutputLengths: [2, 3]
# where the 2nd item in 2nd IdList feature [4] was replaced with [-1].
class SparseItemwiseDropoutWithReplacement(ModelLayer):
def __init__(
self,
model,
input_record,
dropout_prob_train,
dropout_prob_eval,
dropout_prob_predict,
replacement_value,
name='sparse_itemwise_dropout',
**kwargs):
super(SparseItemwiseDropoutWithReplacement, self).__init__(model, name, input_record, **kwargs)
assert schema.equal_schemas(input_record, IdList), "Incorrect input type"
self.dropout_prob_train = float(dropout_prob_train)
self.dropout_prob_eval = float(dropout_prob_eval)
self.dropout_prob_predict = float(dropout_prob_predict)
self.replacement_value = int(replacement_value)
assert (self.dropout_prob_train >= 0 and
self.dropout_prob_train <= 1.0), \
"Expected 0 <= dropout_prob_train <= 1, but got %s" \
% self.dropout_prob_train
assert (self.dropout_prob_eval >= 0 and
self.dropout_prob_eval <= 1.0), \
"Expected 0 <= dropout_prob_eval <= 1, but got %s" \
% dropout_prob_eval
assert (self.dropout_prob_predict >= 0 and
self.dropout_prob_predict <= 1.0), \
"Expected 0 <= dropout_prob_predict <= 1, but got %s" \
% dropout_prob_predict
assert(self.dropout_prob_train > 0 or
self.dropout_prob_eval > 0 or
self.dropout_prob_predict > 0), \
"Ratios all set to 0.0 for train, eval and predict"
self.output_schema = schema.NewRecord(model.net, IdList)
if input_record.lengths.metadata:
self.output_schema.lengths.set_metadata(
input_record.lengths.metadata)
if input_record.items.metadata:
self.output_schema.items.set_metadata(
input_record.items.metadata)
def _add_ops(self, net, ratio):
input_values_blob = self.input_record.items()
input_lengths_blob = self.input_record.lengths()
output_lengths_blob = self.output_schema.lengths()
output_values_blob = self.output_schema.items()
net.SparseItemwiseDropoutWithReplacement(
[
input_values_blob,
input_lengths_blob
],
[
output_values_blob,
output_lengths_blob
],
ratio=ratio,
replacement_value=self.replacement_value
)
def add_train_ops(self, net):
self._add_ops(net, self.dropout_prob_train)
def add_eval_ops(self, net):
self._add_ops(net, self.dropout_prob_eval)
def add_ops(self, net):
self._add_ops(net, self.dropout_prob_predict)
|
pytorch-master
|
caffe2/python/layers/sparse_itemwise_dropout_with_replacement.py
|
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class RandomFourierFeatures(ModelLayer):
"""
Implementation of random fourier feature map for feature processing.
Applies sqrt(2 / output_dims) * cos(wx+b), where:
output_dims is the output feature dimensions, and
wx + b applies FC using randomized, fixed weight and bias parameters
For more information, see the original paper:
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf
Inputs:
output_dims -- output feature dimensions
sigma -- bandwidth for the Gaussian kernel estimator
w_init -- initialization options for weight parameter
b_init -- initialization options for bias parameter
"""
def __init__(
self,
model,
input_record,
output_dims,
sigma, # bandwidth
w_init=None,
b_init=None,
name='random_fourier_features',
**kwargs):
super(RandomFourierFeatures, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
input_dims = input_record.field_type().shape[0]
assert input_dims >= 1, "Expected input dimensions >= 1, got %s" \
% input_dims
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.output_schema = schema.Scalar(
(np.float32, (self.output_dims, )),
self.get_next_blob_reference('output')
)
assert sigma > 0.0, "Expected bandwidth > 0, got %s" % sigma
# Initialize train_init_net parameters
w_init = w_init if w_init else (
'GaussianFill', {'mean': 0.0, 'std': 1.0 / sigma}
)
b_init = b_init if b_init else (
'UniformFill', {'min': 0.0, 'max': 2 * np.pi}
)
self.w = self.create_param(param_name='w',
shape=[self.output_dims, input_dims],
initializer=w_init,
optimizer=model.NoOptim)
self.b = self.create_param(param_name='b',
shape=[self.output_dims],
initializer=b_init,
optimizer=model.NoOptim)
def add_ops(self, net):
# Random features: wx + b
cosine_arg = net.FC(self.input_record.field_blobs() + [self.w, self.b],
net.NextScopedBlob("cosine_arg"))
# Apply cosine to new vectors
new_feature_vec = net.Cos([cosine_arg],
net.NextScopedBlob('new_feature_vec'))
# Multiply each element in vector by sqrt(2/D)
scale = np.sqrt(2.0 / self.output_dims)
net.Scale([new_feature_vec],
self.output_schema.field_blobs(),
scale=scale)
|
pytorch-master
|
caffe2/python/layers/random_fourier_features.py
|
## @package fc
# Module caffe2.python.layers.fc
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
def get_fc_predictor_version(fc_version):
assert fc_version in ["fp32", "fp16"], (
"Only support fp32 and fp16 for the fully connected layer "
"in the predictor net, the provided FC precision is {}".format(fc_version)
)
return fc_version
class FC(SamplingTrainableMixin, ModelLayer):
def __init__(self, model, input_record, output_dims, weight_init=None,
bias_init=None, weight_optim=None, bias_optim=None, name='fc',
weight_reg=None, bias_reg=None, clip_param=None,
max_fc_size=None, axis=1, transposed=False,
uniform_weight_init_scale_numerator=1.0,
**kwargs):
super(FC, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type {}".format(input_record))
assert len(input_record.field_types()[0].shape) > 0, (
"FC expects limited dimensions of the input tensor")
assert axis >= 1, "axis {} should >= 1.".format(axis)
self.axis = axis
input_dims = np.prod(input_record.field_types()[0].shape[axis - 1:])
assert input_dims > 0, (
"FC expects input dimensions > 0, got {}".format(input_dims))
self.clip_args = None
if (clip_param is not None):
assert len(clip_param) == 2, (
'clip_param must be a tuple / list '
'of length 2 and in the form of (clip_min, clip max)'
)
clip_min, clip_max = clip_param
assert clip_min is not None or clip_max is not None, (
'clip_min, and clip_max in clip_param cannot both be None'
)
assert (
(clip_min is None or clip_max is None) or clip_min < clip_max
), (
'clip_param = [clip_min, clip_max] must have clip_min < clip_max'
)
self.clip_args = {}
if clip_min is not None:
self.clip_args['min'] = clip_min
if clip_max is not None:
self.clip_args['max'] = clip_max
if uniform_weight_init_scale_numerator is None:
uniform_weight_init_scale_numerator = 1.0
scale = math.sqrt(uniform_weight_init_scale_numerator / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.output_dim_vec = FC.calculate_fc_output_dims(
max_fc_size, input_dims, output_dims)
self.transposed = transposed
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
weight_shape = [input_dims, output_dims] if transposed else [output_dims, input_dims]
self.w = self.create_param(param_name='w',
shape=weight_shape,
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg)
self.b = self.create_param(param_name='b',
shape=[output_dims, ],
initializer=bias_init,
optimizer=bias_optim,
regularizer=bias_reg)
else:
self.w_vec = []
self.b_vec = []
for idx, output_dim in enumerate(self.output_dim_vec):
weight_shape = [input_dims, output_dim] if transposed else [output_dim, input_dims]
self.w_vec.append(self.create_param(param_name='w_sub_{}'.format(idx),
shape=weight_shape,
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg))
self.b_vec.append(self.create_param(param_name='b_sub_{}'.format(idx),
shape=[output_dim, ],
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg))
if axis == 1:
output_shape = (output_dims, )
else:
output_shape = list(input_record.field_types()[0].shape)[0: axis - 1]
output_shape = tuple(output_shape + [output_dims])
self.output_schema = schema.Scalar(
(np.float32, output_shape),
self.get_next_blob_reference('output')
)
@staticmethod
def calculate_fc_output_dims(max_fc_size, input_dim, output_dim):
if not max_fc_size or max_fc_size < 0:
return None
assert max_fc_size >= input_dim, "Currently we split along the output " \
"dimension. So we need max_fc_size >= input_dim. But, max_fc_size: " \
"{}, input_dim: {}".format(max_fc_size, input_dim)
output_dim_allowed = int(np.floor(max_fc_size / input_dim))
num_fc = int(np.floor((output_dim - 1) / output_dim_allowed) + 1)
output_dim_vec = [output_dim_allowed] * (num_fc - 1)
output_dim_vec.append(output_dim - sum(output_dim_vec))
return output_dim_vec
def _insert_fc_ops(self, net, params, outputs, version):
"""
Args:
net: the caffe2 net to insert operator
params: weight and bias for FC
outputs: the output blobs
version: support fp32 and fp16 for now.
"""
if version == "fp32":
if self.transposed:
return net.FCTransposed(
self.input_record.field_blobs() + params,
outputs,
axis=self.axis,
**self.kwargs
)
else:
return net.FC(
self.input_record.field_blobs() + params,
outputs,
axis=self.axis,
**self.kwargs
)
elif version == "fp16":
return net.FbFCPacked(
self.input_record.field_blobs() + params,
outputs,
axis=self.axis,
**self.kwargs
)
else:
raise Exception("unsupported FC type version {}".format(version))
def _add_ops(self, net, params, version):
"""
Args:
params : the weight and bias,
passed by either add_ops or add_train_ops function
version : fp16 or fp32, might support in8 in the future.
"""
if self.clip_args is not None:
clipped_params = [net.NextScopedBlob(
'clipped_%s' % str(p)) for p in params]
for p, cp in zip(params, clipped_params):
net.Clip([p], [cp], **self.clip_args)
params = clipped_params
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
self._insert_fc_ops(net, params, self.output_schema.field_blobs(), version)
else:
w_vec = params[:int(len(params) / 2)]
b_vec = params[int(len(params) / 2):]
assert len(w_vec) == len(b_vec)
output_blob_vec = []
for i in range(len(self.output_dim_vec)):
output_blob = net.NextScopedBlob(
'output_sub_{}'.format(i))
insert_ret = self._insert_fc_ops(
net, [w_vec[i], b_vec[i]], [output_blob], version
)
output_blob_vec.append(insert_ret)
net.Concat(output_blob_vec,
self.output_schema.field_blobs() +
[self.output_schema.field_blobs()[0] + "_concat_dims"])
def add_ops(self, net):
"""Both the predict net and the eval net will call this function
"""
version_info = get_current_scope().get(
get_fc_predictor_version.__name__, {'fc_version': 'fp32'}
)
predictor_fc_fp_version = version_info['fc_version']
self._add_ops(net, self.param_blobs, predictor_fc_fp_version)
def add_train_ops(self, net):
# use the train_param_blobs to be consistent with the SamplingTrain unittest
self._add_ops(net, self.train_param_blobs, "fp32")
def get_fp16_compatible_parameters(self):
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
return [self.w]
else:
return self.w_vec
@property
def param_blobs(self):
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
return [self.w, self.b]
else:
return self.w_vec + self.b_vec
|
pytorch-master
|
caffe2/python/layers/fc.py
|
## @package concat
# Module caffe2.python.layers.concat
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from future.utils import viewitems
import numpy as np
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
def get_concatenated_feature_to_index(blobs_to_concat):
concat_feature_to_index = defaultdict(list)
start_pos = 0
for scalar in blobs_to_concat:
num_dims = scalar.dtype.shape[0]
if hasattr(scalar, 'metadata') \
and hasattr(scalar.metadata, 'feature_specs') \
and hasattr(scalar.metadata.feature_specs, 'feature_to_index') \
and isinstance(scalar.metadata.feature_specs.feature_to_index, dict): # noqa B950
for k, v in scalar.metadata.feature_specs.feature_to_index.items():
concat_feature_to_index[k].extend([start_pos + vi for vi in v])
start_pos += num_dims
return dict(concat_feature_to_index) if concat_feature_to_index.keys() else None
class Concat(ModelLayer):
"""
Construct Concat layer
Assume that first dimension is batch,
Example:
embedding_dim = 64
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (embedding_dim, )))),
('input2', schema.Scalar((np.float32, (embedding_dim, )))),
('input3', schema.Scalar((np.float32, (embedding_dim, )))),
))
output = self.model.Concat(input_record)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields) * embedding_dim, )))),
output
)
# Note that in Concat layer we assume first dimension is batch.
# so input is B * embedding_dim
# add_axis=1 make it B * 1 * embedding_dim
# Concat on axis=1 make it B * N * embedding_dim
output = self.model.Concat(input_record, axis=1, add_axis=1)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields), embedding_dim)))),
output
)
"""
def __init__(self, model, input_record, axis=1, add_axis=0,
name='concat', **kwargs):
super(Concat, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
self.add_axis = add_axis
assert not (axis == 0 and add_axis == 1), \
"It's not allowed to add axis=0"
assert isinstance(input_record, schema.Struct),\
"Incorrect input type. Expected Struct, but received: {0}".\
format(input_record)
shapes = []
for field_name, field_type in viewitems(input_record.fields):
assert isinstance(field_type, schema.Scalar),\
"Incorrect input type for {}. Expected Scalar, but got: {}".\
format(field_name, field_type)
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
shape = list(field_type.field_type().shape)
if add_axis:
shape.insert(axis - 1, 1)
assert len(shape) >= axis,\
"Concat expects that limited dimensions of the input tensor"
shapes.append(shape)
logger.info('Concat Layer input shapes: ' + str(shapes))
if axis == 0:
self.output_schema = schema.from_blob_list(
input_record[0],
[self.get_next_blob_reference('output')]
)
return
concat_dim = 0
for shape in shapes:
concat_dim += shape[axis - 1]
shape[axis - 1] = 0
assert shape == shapes[0],\
"Shapes {0} and {1} are not compatible for Concat".\
format(shape, shapes[0])
output_dims = shapes[0]
output_dims[axis - 1] = concat_dim
logger.info('Concat Layer output_dims: ' + str(output_dims))
self.output_schema = schema.Scalar(
(np.float32, output_dims),
self.get_next_blob_reference('output'))
record_to_concat = input_record.fields.values()
concated_feature_to_index = get_concatenated_feature_to_index(
record_to_concat
)
if concated_feature_to_index:
metadata = schema.Metadata(
feature_specs=schema.FeatureSpec(
feature_to_index=concated_feature_to_index
)
)
self.output_schema.set_metadata(metadata)
def add_ops(self, net):
net.Concat(
self.input_record.field_blobs(),
[
self.output_schema.field_blobs()[0],
self.output_schema.field_blobs()[0] + "_concat_dims"
],
axis=self.axis,
add_axis=self.add_axis,
)
|
pytorch-master
|
caffe2/python/layers/concat.py
|
## @package position_weighted
# Module caffe2.python.layers.position_weighted
import logging
import numpy as np
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
)
from caffe2.python.layers.tags import Tags
logger = logging.getLogger(__name__)
class PositionWeighted(ModelLayer):
def __init__(self, model, input_record, weight_optim=None,
name="position_weights"):
super(PositionWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
length_metadata = input_record.lengths.metadata
max_length = (length_metadata.categorical_limit if length_metadata is
not None else None)
if max_length is not None:
self.shape = max_length
else:
self.shape = get_categorical_limit(input_record)
logger.warning(
'{}: categorical_limit of lengths is not available, using '
'categorical_limit of the keys: {}'.format(
str(input_record.lengths()), self.shape))
self.pos_w = self.create_param(param_name='pos_w',
shape=[self.shape, ],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=weight_optim)
self.output_schema = schema.Struct(
('position_weights',
schema.Scalar((np.float32, self.shape),
self.get_next_blob_reference("pos_w_gather")))
)
self.tags.update({Tags.HANDLE_AS_SPARSE_LAYER})
def get_memory_usage(self):
return self.shape
def add_ops(self, net):
inc_seq = net.LengthsRangeFill(
[self.input_record.lengths()],
self.input_record.lengths() + '_pos_w_seq'
)
net.Gather(
[self.pos_w, inc_seq],
self.output_schema.position_weights.field_blobs())
|
pytorch-master
|
caffe2/python/layers/position_weighted.py
|
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class LayerNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='layer_normalization',
scale_optim=None,
bias_optim=None,
epsilon=1e-4,
axis=1,
use_layer_norm_op=True,
scale_init_value=1.0,
**kwargs
):
super(LayerNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type: {}".format(input_record))
self.input_shape = input_record.field_type().shape
self.axis = axis
assert len(self.input_shape) >= 1, (
"This layer supports only >= 2D tensors")
input_dims = self.input_shape[0]
self.output_schema = schema.Scalar(
(np.float32, self.input_shape),
self.get_next_blob_reference('output')
)
self.scale = self.create_param(param_name='scale',
shape=[input_dims],
initializer=('ConstantFill', {'value': scale_init_value}),
optimizer=scale_optim)
self.bias = self.create_param(param_name='bias',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=bias_optim)
self.use_layer_norm_op = use_layer_norm_op
if self.use_layer_norm_op:
self.epsilon = epsilon
else:
assert len(self.input_shape) == 1, (
"When using alternative implementation, "
"input data can only be 2D"
)
self.epsilon = model.maybe_add_global_constant(
"%s_epsilon" % self.name, float(epsilon)
)
def add_ops_with_layer_norm_op(self, net):
input_blob = self.input_record.field_blobs()
ln_output = self.output_schema.field_blobs()
output_blobs = [net.NextScopedBlob('ln_output'), net.NextScopedBlob('ln_mean'),
net.NextScopedBlob('ln_stdev')]
normalized, mean, stdev = net.LayerNorm(input_blob,
output_blobs,
axis=self.axis,
epsilon=self.epsilon)
scaled = net.Mul(
[normalized, self.scale],
[net.NextScopedBlob('ln_scaled')],
broadcast=1,
axis=self.axis,
)
net.Add(
[scaled, self.bias],
ln_output,
broadcast=1,
axis=self.axis,
)
def add_ops_without_layer_norm_op(self, net):
# two issues here:
# 1. use multiple ops to replace the function of LayerNorm
# 2. do not use legacy broadcast
ln_output = net.NextScopedBlob("ln_output")
ln_mean = net.NextScopedBlob("ln_mean")
ln_stdev = net.NextScopedBlob("ln_stdev")
ln_mean_arr = net.NextScopedBlob("ln_mean_arr")
net.ReduceBackMean(self.input_record.field_blobs(), [ln_mean_arr])
net.ExpandDims([ln_mean_arr], [ln_mean], dims=[1])
ln_centered = net.NextScopedBlob("ln_centered")
net.Sub(self.input_record.field_blobs() + [ln_mean], [ln_centered])
ln_sqr = net.NextScopedBlob("ln_sqr")
net.Sqr([ln_centered], [ln_sqr])
ln_sqr_mean = net.NextScopedBlob("ln_sqr_mean")
net.ReduceBackMean([ln_sqr], [ln_sqr_mean])
ln_var = net.NextScopedBlob("ln_var")
net.Add([ln_sqr_mean, self.epsilon], ln_var)
ln_std_arr = net.NextScopedBlob("ln_std_arr")
net.Pow([ln_var], [ln_std_arr], exponent=0.5)
net.ExpandDims([ln_std_arr], [ln_stdev], dims=[1])
net.Div([ln_centered, ln_stdev], [ln_output])
ln_scaled = net.NextScopedBlob("ln_scaled")
net.Mul([ln_output, self.scale], [ln_scaled])
net.Add([ln_scaled, self.bias], self.output_schema.field_blobs())
def add_ops(self, net):
if self.use_layer_norm_op:
self.add_ops_with_layer_norm_op(net)
else:
self.add_ops_without_layer_norm_op(net)
|
pytorch-master
|
caffe2/python/layers/layer_normalization.py
|
## @package random_neg_rank_loss
# Module caffe2.python.layers.random_neg_rank_loss
from caffe2.python import schema, core
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class MarginRankLoss(ModelLayer):
def __init__(self, model, input_record, name='margin_rank_loss',
margin=0.1, average_loss=False, **kwargs):
super(MarginRankLoss, self).__init__(model, name, input_record, **kwargs)
assert margin >= 0, ('For hinge loss, margin should be no less than 0')
self._margin = margin
self._average_loss = average_loss
assert schema.is_schema_subset(
schema.Struct(
('pos_prediction', schema.Scalar()),
('neg_prediction', schema.List(np.float32)),
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
neg_score = self.input_record.neg_prediction['values']()
pos_score = net.LengthsTile(
[
self.input_record.pos_prediction(),
self.input_record.neg_prediction['lengths']()
],
net.NextScopedBlob('pos_score_repeated')
)
const_1 = net.ConstantFill(
neg_score,
net.NextScopedBlob('const_1'),
value=1,
dtype=core.DataType.INT32
)
rank_loss = net.MarginRankingCriterion(
[pos_score, neg_score, const_1],
net.NextScopedBlob('rank_loss'),
margin=self._margin,
)
if self._average_loss:
net.AveragedLoss(rank_loss, self.output_schema.field_blobs())
else:
net.ReduceFrontSum(rank_loss, self.output_schema.field_blobs())
|
pytorch-master
|
caffe2/python/layers/margin_rank_loss.py
|
## @package batch_softmax_loss
# Module caffe2.python.layers.batch_softmax_loss
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class BatchSoftmaxLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_softmax_loss',
label_smoothing_matrix=None,
label_prob=False,
scale=1.0,
average_by_batch_size=False,
**kwargs
):
super(BatchSoftmaxLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar()),
),
input_record
)
self.label_prob = label_prob
self.scale = scale
self.average_by_batch_size = average_by_batch_size
# label smoothing matrix: a K * K matrix where K is the label
# cardinality; (i, j) element is the value of for label i
# treated/smoothed as label j
self.label_smoothing_matrix = label_smoothing_matrix
if self.label_smoothing_matrix is not None:
self.initialize_label_smoothing_constants()
self.output_schema = schema.Struct(
(
'softmax', schema.Scalar(
input_record.prediction.field_type(),
self.get_next_blob_reference('softmax')
)
),
(
'loss', schema.Scalar(
np.float32, self.get_next_blob_reference('loss')
)
),
)
def initialize_label_smoothing_constants(self):
assert self.label_smoothing_matrix is not None
self.label_smoothing_matrix = np.array(
self.label_smoothing_matrix).astype(np.float32)
assert len(self.label_smoothing_matrix.shape) == 2
label_dim = self.label_smoothing_matrix.shape[0]
assert label_dim == self.label_smoothing_matrix.shape[1]
self.label_smoothing_matrix = self.model.add_global_constant(
'%s_label_smoothing_matrix' % self.name,
array=self.label_smoothing_matrix,
dtype=np.dtype(np.float32),
)
self.label_dim = self.model.add_global_constant(
'%s_label_dim' % self.name,
array=label_dim,
dtype=np.dtype(np.int64),
)
# default case: label is given NOT as target distribution
# but when used in label smoothing, the label must be in probabilities
self.label_prob = True
def compute_smoothed_label(self, net):
assert self.label_smoothing_matrix is not None
label = self.input_record.label()
original_label_type = self.input_record.label.field_type()
if original_label_type.base != np.int64:
int64_label = net.NextScopedBlob('int64_label')
net.Cast([label], [int64_label], to=core.DataType.INT64)
else:
int64_label = label
one_hot_label = net.NextScopedBlob('one_hot_label')
smoothed_label = net.NextScopedBlob('smoothed_label')
net.OneHot([int64_label, self.label_dim], [one_hot_label])
net.MatMul([one_hot_label, self.label_smoothing_matrix], smoothed_label)
return smoothed_label
def add_ops(self, net):
label = self.input_record.label.field_blobs()
if self.label_smoothing_matrix is not None:
label = [self.compute_smoothed_label(net)]
elif not self.label_prob:
if self.input_record.label.field_types()[0].base != np.int32:
label = [
net.Cast(label,
net.NextScopedBlob('int32_label'),
to=core.DataType.INT32)
]
softmax_input = self.input_record.prediction.field_blobs() + label
if 'weight' in self.input_record:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
softmax_input += [weight_blob]
net.SoftmaxWithLoss(
softmax_input,
self.output_schema.field_blobs(),
label_prob=self.label_prob,
scale=self.scale,
average_by_batch_size=self.average_by_batch_size,
)
|
pytorch-master
|
caffe2/python/layers/batch_softmax_loss.py
|
## @package add_bias
# Module caffe2.python.layers.add_bias
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import math
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = self.create_param(
param_name='b',
shape=[input_dims, ],
initializer=bias_init,
optimizer=bias_optim,
)
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
|
pytorch-master
|
caffe2/python/layers/add_bias.py
|
## @package fc_without_bias
# Module caffe2.python.layers.fc_without_bias
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FCWithoutBias(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
weight_init=None,
weight_optim=None,
name='fc_without_bias',
uniform_weight_init_scale_numerator=1.0,
**kwargs
):
super(FCWithoutBias, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_types()[0].shape) > 0, (
"FCWithoutBias expects limited dimensions of the input tensor"
)
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FCWithoutBias expects input dimensions > 0, got {}".format(input_dims)
)
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
self.get_next_blob_reference('output')
)
scale = math.sqrt(uniform_weight_init_scale_numerator / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale,
'max': scale}
)
self.w = self.create_param(param_name='w',
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim)
def _add_ops(self, net, params):
net.MatMul(
self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), trans_b=1, **self.kwargs
)
@property
def param_blobs(self):
return [self.w]
|
pytorch-master
|
caffe2/python/layers/fc_without_bias.py
|
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class ArcCosineFeatureMap(ModelLayer):
"""
A general version of the arc-cosine kernel feature map (s = 1 restores
the original arc-cosine kernel feature map).
Applies H(x) * x^s, where H is the Heaviside step function and x is the
input after applying FC (such that x = w * x_orig + b).
For more information, see the original paper:
http://cseweb.ucsd.edu/~saul/papers/nips09_kernel.pdf
Inputs :
output_dims -- dimensions of the output vector
s -- degree to raise transformed features
scale -- amount to scale the standard deviation
weight_init -- initialization distribution for weight parameter
bias_init -- initialization distribution for bias pararmeter
weight_optim -- optimizer for weight params; None for random features
bias_optim -- optimizer for bias param; None for random features
set_weight_as_global_constant -- if True, initialized random parameters
will be constant across all distributed
instances of the layer
initialize_output_schema -- if True, initialize output schema as Scalar
from Arc Cosine; else output schema is None
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
scale=1.0,
weight_init=None,
bias_init=None,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=False,
initialize_output_schema=True,
name='arc_cosine_feature_map',
**kwargs):
super(ArcCosineFeatureMap, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.params = []
self.model = model
self.set_weight_as_global_constant = set_weight_as_global_constant
self.input_dims = input_record.field_type().shape[0]
assert self.input_dims >= 1, "Expected input dimensions >= 1, got %s" \
% self.input_dims
if initialize_output_schema:
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
model.net.NextScopedBlob(name + '_output')
)
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.s = s
assert (self.s >= 0), "Expected s >= 0, got %s" % self.s
assert isinstance(self.s, int), "Expected s to be type int, got type %s" \
% type(self.s)
assert (scale > 0.0), "Expected scale > 0, got %s" % scale
self.stddev = scale * np.sqrt(1.0 / self.input_dims)
# Initialize train_init_net parameters
# Random Parameters
if set_weight_as_global_constant:
w_init = np.random.normal(scale=self.stddev,
size=(self.output_dims, self.input_dims))
b_init = np.random.uniform(low=-0.5 * self.stddev,
high=0.5 * self.stddev,
size=self.output_dims)
self.random_w = self.model.add_global_constant(
name=self.name + "_fixed_rand_W",
array=w_init
)
self.random_b = self.model.add_global_constant(
name=self.name + "_fixed_rand_b",
array=b_init
)
else:
(self.random_w, self.random_b) = self._initialize_params(
'random_w',
'random_b',
w_init=weight_init,
b_init=bias_init,
w_optim=weight_optim,
b_optim=bias_optim
)
def _initialize_params(self, w_name, b_name, w_init=None, b_init=None,
w_optim=None, b_optim=None):
"""
Initializes the Layer Parameters for weight and bias terms for features
Inputs :
w_blob -- blob to contain w values
b_blob -- blob to contain b values
w_init -- initialization distribution for weight parameter
b_init -- initialization distribution for bias parameter
w_optim -- optimizer to use for w; if None, then will use no optimizer
b_optim -- optimizer to user for b; if None, then will use no optimizer
"""
w_init = w_init if w_init else (
'GaussianFill', {'mean': 0.0, 'std': self.stddev}
)
w_optim = w_optim if w_optim else self.model.NoOptim
b_init = b_init if b_init else (
'UniformFill', {'min': -0.5 * self.stddev, 'max': 0.5 * self.stddev}
)
b_optim = b_optim if b_optim else self.model.NoOptim
w_param = self.create_param(param_name=w_name,
shape=(self.output_dims, self.input_dims),
initializer=w_init,
optimizer=w_optim)
b_param = self.create_param(param_name=b_name,
shape=[self.output_dims],
initializer=b_init,
optimizer=b_optim)
return [w_param, b_param]
def _heaviside_with_power(self, net, input_features, output_blob, s):
"""
Applies Heaviside step function and Relu / exponentiation to features
depending on the value of s.
Inputs:
net -- net with operators
input_features -- features to processes
output_blob -- output blob reference
s -- degree to raise the transformed features
"""
if s == 0:
softsign_features = net.Softsign([input_features],
net.NextScopedBlob('softsign'))
return net.Relu(softsign_features, output_blob)
elif s == 1:
return net.Relu([input_features],
output_blob)
else:
relu_features = net.Relu([input_features],
net.NextScopedBlob('relu_rand'))
pow_features = net.Pow([input_features],
net.NextScopedBlob('pow_rand'),
exponent=float(s - 1))
return net.Mul([relu_features, pow_features],
output_blob)
def add_ops(self, net):
input_blob = self.input_record.field_blobs()
# Random features: wx + b
random_features = net.FC(input_blob + [self.random_w, self.random_b],
net.NextScopedBlob('random_features'))
# Process random features
self._heaviside_with_power(net,
random_features,
self.output_schema.field_blobs(),
self.s)
|
pytorch-master
|
caffe2/python/layers/arc_cosine_feature_map.py
|
## @package bpr_loss
# Module caffe2.python.layers.bpr_loss
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
# ref: https://arxiv.org/pdf/1205.2618.pdf
class BPRLoss(ModelLayer):
def __init__(self, model, input_record, name='bpr_loss', **kwargs):
super(BPRLoss, self).__init__(model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('pos_prediction', schema.Scalar()),
('neg_prediction', schema.List(np.float32)),
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
# formula:
# loss = - SUM(Ln(Sigmoid(Simlarity(u, pos) - Simlarity(u, neg))))
neg_score = self.input_record.neg_prediction['values']()
pos_score = net.LengthsTile(
[
self.input_record.pos_prediction(),
self.input_record.neg_prediction['lengths']()
],
net.NextScopedBlob('pos_score_repeated')
)
# https://www.tensorflow.org/api_docs/python/tf/math/log_sigmoid
softplus = net.Softplus([net.Sub([neg_score, pos_score])])
net.ReduceFrontSum(softplus, self.output_schema.field_blobs())
|
pytorch-master
|
caffe2/python/layers/bpr_loss.py
|
from importlib import import_module
import pkgutil
import sys
from . import layers
def import_recursive(package):
"""
Takes a package and imports all modules underneath it
"""
pkg_dir = package.__path__
module_location = package.__name__
for (_module_loader, name, ispkg) in pkgutil.iter_modules(pkg_dir):
module_name = "{}.{}".format(module_location, name) # Module/package
module = import_module(module_name)
if ispkg:
import_recursive(module)
def find_subclasses_recursively(base_cls, sub_cls):
cur_sub_cls = base_cls.__subclasses__()
sub_cls.update(cur_sub_cls)
for cls in cur_sub_cls:
find_subclasses_recursively(cls, sub_cls)
import_recursive(sys.modules[__name__])
model_layer_subcls = set()
find_subclasses_recursively(layers.ModelLayer, model_layer_subcls)
for cls in list(model_layer_subcls):
layers.register_layer(cls.__name__, cls)
|
pytorch-master
|
caffe2/python/layers/__init__.py
|
# @package batch_huber_loss
# Module caffe2.python.layers.batch_huber_loss
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchHuberLoss(ModelLayer):
def __init__(self, model, input_record, name='batch_huber_loss', delta=1.0, **kwargs):
super(BatchHuberLoss, self).__init__(model, name, input_record, **kwargs)
assert delta > 0
self._delta = delta
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
prediction = net.Squeeze(
self.input_record.prediction(),
net.NextScopedBlob('squeezed_prediction'),
dims=[1]
)
label = self.input_record.label.field_blobs()
if self.input_record.label.field_type().base != (
self.input_record.prediction.field_type().base):
label = net.Cast(
label,
net.NextScopedBlob('cast_label'),
to=schema.data_type_for_dtype(
self.input_record.prediction.field_type()
)
)
const_delta = net.ConstantFill(
label,
net.NextScopedBlob("delta"),
value=self._delta,
dtype=core.DataType.FLOAT,
)
label = net.StopGradient(
label,
net.NextScopedBlob('stopped_label')
)
const_delta = net.StopGradient(
const_delta,
net.NextScopedBlob('stopped_delta')
)
# abs_error = np.abs(true - pred)
abs_error = net.L1Distance(
[label, prediction], net.NextScopedBlob("abs_error")
)
# quadratic = 0.5*min(abs_error, delta)^2, linear = delta*max(abs_error-delta, 0)
min_error = net.Min(
[abs_error, const_delta], net.NextScopedBlob("min_error_delta")
)
quadratic_term = net.Scale(
net.Sqr(min_error), scale=float(0.5)
)
linear_term = net.Mul(
[
net.Sub([abs_error, min_error]),
const_delta,
],
net.NextScopedBlob("huber_linear_term")
)
# huber = 0.5 * min(abs_error, delta)^2 + delta * max(abs_error-delta, 0)
huber_dist = net.Add(
[quadratic_term, linear_term], net.NextScopedBlob("huber_dist")
)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
huber_dist = net.Mul(
[huber_dist, weight_blob],
net.NextScopedBlob("weighted_huber_distance"),
)
net.AveragedLoss(huber_dist, self.output_schema.field_blobs())
|
pytorch-master
|
caffe2/python/layers/batch_huber_loss.py
|
## @package sparse_lookup
# Module caffe2.python.layers.sparse_lookup
from caffe2.python.optimizer import FP16_ENGINES, Optimizer
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
get_key,
IdList,
IdScoreList,
IdListWithEvicted,
IdScoreListWithEvicted,
LayerPsParam,
ModelLayer,
almost_equal_schemas,
)
import collections
import functools
import logging
import math
import numpy as np
import operator
logger = logging.getLogger(__name__)
def get_trainer_version_based_on_optim(optim_def):
if isinstance(optim_def, Optimizer) and hasattr(optim_def, "engine"):
logger.info(
"Attempting to set trainer version for engine {}".format(optim_def.engine)
)
if optim_def.engine in FP16_ENGINES:
logger.info("Setting FP16 trainer for engine {}".format(optim_def.engine))
return "fp16"
else:
logger.info("Setting FP32 trainer for engine {}".format(optim_def.engine))
return "fp32"
else:
return "fp32"
def get_sparse_lookup_predictor_version(
version,
blob_size=None,
min_blob_size_4bits=None,
embedding_dim=None,
sparse_feature_name=None,
):
assert version in {
'fp32', 'fp16', 'uint8rowwise', 'fused_uint8rowwise', 'fused_uint4rowwise'
}, "Unexpected version of sparse_lookup layer {0}".format(version)
if version == 'fused_uint4rowwise':
if (
blob_size is not None
and min_blob_size_4bits is not None
and embedding_dim is not None
):
if blob_size < min_blob_size_4bits:
logger.info(
"{} fall back to uint8 because lookup table size {} < min_blob_size_4bits {}".format(
sparse_feature_name,
blob_size,
min_blob_size_4bits,
)
)
version = 'fused_uint8rowwise'
if embedding_dim % 2 == 1:
logger.info(
"{} fall back to uint8 because lookup table dimension {} is not divisible by 2".format(
sparse_feature_name, embedding_dim
)
)
version = 'fused_uint8rowwise'
else:
raise ValueError(
(
"When 4 bit quantization is enabled for {}, "
"(i.e., Sparse lookup predictor version:{}), "
"requires arguments blob_size:{}, "
"min_blob_size_4bits:{}, embedding_dim:{}"
).format(
sparse_feature_name,
version,
blob_size,
min_blob_size_4bits,
embedding_dim
)
)
return version
def get_sparse_lookup_trainer_version(version):
assert version in {'fp32', 'fp16'},\
"Unexpected version of sparse_lookup layer {0}".format(version)
return version
def _is_id_list(input_record):
return almost_equal_schemas(input_record, IdList)
def _is_id_score_list(input_record):
return almost_equal_schemas(input_record,
IdScoreList,
check_field_types=False)
class SparseLookup(ModelLayer):
_id_list_supported_reducers = [
'LogMeanExp', 'LogSumExp', 'Max', 'Mean', 'Sum',
'WeightedSum', 'WeightedMean', 'Sqrt', 'None']
_id_score_list_supported_reducers = [
'PositionWeighted', 'RecencyWeighted', 'Mean', 'Sum', 'WeightedSum',
'WeightedMean', 'None'
]
_fp16_compatible_init_op_types = [
'Float16UniformFill'
]
_fp16_compatible_reducers = [
'Sum', 'Mean', 'Sqrt', 'PositionWeighted', 'RecencyWeighted',
]
def __init__(self, model, input_record, inner_shape, reducer,
weight_init=None, weight_optim=None,
name='sparse_lookup', regularizer=None, use_external_weights=False,
uniform_weight_init_scale_numerator=1.0, **kwargs):
super(SparseLookup, self).__init__(model, name, input_record, **kwargs)
self.sparse_key = get_key(self.input_record)()
logger.info("Setup the sparse lookup layer for " + self.sparse_key)
# TODO Add some asserts about input type
if isinstance(inner_shape, int):
inner_shape = [inner_shape]
assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
"Unexpected type for inner_shape, expected list or tuple, got {0} for {1}".\
format(type(inner_shape), self.sparse_key)
if reducer == "PositionWeighted":
assert _is_id_score_list(self.input_record), (
"PositionWeighted only support IdScoreList, but got {} for {}"
+ "please use PositionWeighted layer to convert IdList "
+ "to IdScoreList"
).format(repr(self.input_record), self.sparse_key)
self.external_weights = self.input_record.values()
elif reducer == "RecencyWeighted":
assert _is_id_score_list(self.input_record), (
"RecencyWeighted only supports IdScoreList, "
"while the sparse feature {} is not.".format(self.sparse_key)
)
self.external_weights = self.input_record.values()
# TODO: create a new type of reducer with external weights to wrap
# this and the above two cases since essentially their input formats
# are the same.
elif use_external_weights:
assert _is_id_score_list(self.input_record), (
"Use_external_weights only supports IdScoreList, "
"while the sparse feature {} is not.".format(self.sparse_key)
)
assert reducer in ["Sum", "WeightedSum"], (
"Use_external_weights only supports Sum reducer, "
"while the reducer is {}.".format(reducer)
)
self.external_weights = self.input_record.values()
self.reducer = reducer
self.use_external_weights = use_external_weights
input_dim = get_categorical_limit(self.input_record)
assert input_dim > 0, "{} should have categorical limit > 0, but got {}".format(
self.sparse_key, input_dim
)
self.input_dim = input_dim
self.shape = [input_dim] + inner_shape
self.trainer_version = get_trainer_version_based_on_optim(
weight_optim
)
self.uniform_weight_init_scale_numerator = uniform_weight_init_scale_numerator
default_init_op = self._get_default_init_op()
self.weight_init = weight_init or default_init_op
self.evicted_values = None
if schema.equal_schemas(
self.input_record, IdListWithEvicted
) or schema.equal_schemas(
self.input_record, IdScoreListWithEvicted, check_field_types=False
):
self.evicted_values = self.input_record._evicted_values
# If fp16 is used, make sure fp16 init op is used
if self.trainer_version == "fp16":
assert self.reducer in self._fp16_compatible_reducers or use_external_weights, (
"Fp16 training is enabled. The reducer specified is not supported. "
"Got {}. Supported reducers: {}. Right now, in general, sum, mean, "
"positional pooling are supported. Attention is not. Please check "
"if there is fp16 trained sparse features using advanced pooling.".format(
self.reducer, self._fp16_compatible_reducers)
)
# if init op is UniformFill, we replace it directly
if self.weight_init[0] == "UniformFill":
self.weight_init = ("Float16UniformFill", self.weight_init[1])
assert self.weight_init[0] in self._fp16_compatible_init_op_types, (
"Fp16 training is enabled. Init op for weight parameter must be fp16 "
"compatibale. Got {}. Supported ops: {}".format(
self.weight_init[0],
self._fp16_compatible_init_op_types)
)
assert regularizer is None, "Regularizer is not compatible with fp16"
if self.input_record.lengths.metadata:
avg_length = self.input_record.lengths.metadata.expected_value
else:
avg_length = None
self.w = self.create_param(
param_name='w',
shape=self.shape,
initializer=self.weight_init,
optimizer=weight_optim,
ps_param=LayerPsParam(
sparse_key=self.sparse_key,
average_length=avg_length),
regularizer=regularizer
)
if self.evicted_values:
self.reinit_vec = self.create_param(
param_name="reinit_vec",
shape=inner_shape,
initializer=self.weight_init,
optimizer=model.NoOptim,
regularizer=None,
)
self.scale_bias_init = ('ConstantFill', {'value': 0.0})
self.scale_bias = self.create_param(
param_name='scale_bias',
shape=[],
initializer=self.scale_bias_init,
optimizer=model.NoOptim,
)
self.output_schema = schema.Scalar(
(np.float32, inner_shape),
self.get_next_blob_reference('output'),
)
def get_memory_usage(self):
return functools.reduce(operator.mul, self.shape) * 4
def get_fp16_compatible_parameters(self):
return [self.w]
def support_8bit(self):
# Rowwise quantization makes sense only if shape it's 2D matrix with
# second dimension >= 8
if len(self.shape) != 2 or self.shape[1] < 8:
return False
return True
def get_8bits_compatible_parameters(self, fused=True):
if not self.support_8bit():
return []
if fused:
RowwiseQuantized8BitsWeight = collections.namedtuple(
'RowwiseQuantized8BitsWeight', 'w'
)
return [RowwiseQuantized8BitsWeight(self.w)]
else:
RowwiseQuantized8BitsWeight = collections.namedtuple(
'RowwiseQuantized8BitsWeight', 'w, scale_bias'
)
return [RowwiseQuantized8BitsWeight(self.w, self.scale_bias)]
def _get_default_init_op(self):
scale = math.sqrt(self.uniform_weight_init_scale_numerator / self.input_dim)
if self.trainer_version == 'fp32':
default_weight_init = ('UniformFill', {'min': -scale, 'max': scale})
elif self.trainer_version == 'fp16':
default_weight_init = ("Float16UniformFill", {'min': -scale, 'max': scale})
else:
raise NotImplementedError(
"Train version {} is not currently supported for sparse feature {}".format(
trainer_version, self.sparse_key
)
)
return default_weight_init
def _gather_wrapper(self, net, version, in_indices, out):
# Gather can work on all kinds of input data types, and output
# data with the same type. Convert the output of Gather to float,
# because the follow-up Ops expect fp32.
if version == 'fp32':
return net.Gather([self.w, in_indices], out)
elif version == 'fp16':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.HalfToFloat(gathered_w, out)
elif version == 'uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
gathered_scale_bias = net.Gather(
[self.scale_bias, in_indices],
'gathered_scale_bias'
)
return net.Rowwise8BitQuantizedToFloat(
[gathered_w, gathered_scale_bias], out)
elif version == 'fused_uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.Fused8BitRowwiseQuantizedToFloat(gathered_w, out)
elif version == 'fused_uint4rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.Fused4BitRowwiseQuantizedToFloat(gathered_w, out)
else:
raise "Unsupported version of operators in SparseLookup " +\
"layer: {0} for sparse feature {1}".format(
version, self.sparse_key
)
def _sparse_lengths_weighted_reducer(
self,
in_indices,
weights,
reducer,
net,
version,
grad_on_weights=0,
):
op_input = [
self.w,
weights,
in_indices,
self.input_record.lengths(),
]
layer_name = 'SparseLengths' + reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops will accept either fp16 or fp32 embedding
# matrix and output fp32 pooled embedding
# A special case here is that we need FP16 engine for
# SparseLengthsWeightedSum when FP16 embeedings are used for
# correct backward updates
if reducer == "WeightedSum" and version == "fp16":
net.SparseLengthsWeightedSum(
op_input,
self.output_schema.field_blobs(),
grad_on_weights=grad_on_weights,
engine='FP16',
)
else:
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
grad_on_weights=grad_on_weights,
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint4rowwise':
net.__getattr__(layer_name + 'Fused4BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0} for sparse feature {1}".format(
version, self.sparse_key
)
# deal with sparse features of id_list type
def _add_ops_id_list(self, net, version):
assert self.reducer in self._id_list_supported_reducers, (
"Unsupported reducer: {} for ID_LIST {}".format(
self.reducer, self.sparse_key
)
)
if self.reducer in ['Sum', 'Mean', 'WeightedSum', 'WeightedMean']:
op_input = [self.w,
self.input_record.items(),
self.input_record.lengths()]
# For id list features, the behaviors of 'Sum' and
# 'WeightedSum' are identical, since we can regard the weight on each
# id as 1. Similarly, for 'Mean' and 'WeightedMean'.
if self.reducer == 'WeightedSum':
self.reducer = 'Sum'
elif self.reducer == 'WeightedMean':
self.reducer = 'Mean'
layer_name = 'SparseLengths' + self.reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops will accept either fp16 or fp32 embedding
# matrix and output fp32 pooled embedding
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint4rowwise':
net.__getattr__(layer_name + 'Fused4BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0} for sparse feature {1}".format(
version, self.sparse_key
)
elif self.reducer == 'Sqrt':
sqrt_weight = net.LengthsToWeights(
[self.input_record.lengths()],
[net.NextScopedBlob('lengths_sqrt')],
power=0.5,
)
self._sparse_lengths_weighted_reducer(
self.input_record.items(),
sqrt_weight,
'WeightedSum', net, version)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.items(),
self.output_schema.field_blobs())
else:
table_rows = self._gather_wrapper(
net, version, self.input_record.items(), 'table_rows')
segment_ids = net.LengthsToSegmentIds(
self.input_record.lengths(),
net.NextScopedBlob(self.input_record.lengths() + '_sid'))
net.__getattr__('SortedSegmentRange' + self.reducer)(
[table_rows, segment_ids],
self.output_schema.field_blobs(),
)
# deal with sparse features of id_score_list type
def _add_ops_id_score_list(self, net, version):
assert self.reducer in self._id_score_list_supported_reducers, (
"Unsupported reducer: {} for ID_SCORE_LIST {}".format(
self.reducer, self.sparse_key
)
)
if self.reducer in ['WeightedSum', 'WeightedMean']:
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.input_record.values(),
self.reducer, net, version)
elif self.reducer in ['PositionWeighted', 'RecencyWeighted'] or self.use_external_weights:
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.external_weights,
'WeightedSum', net, version, grad_on_weights=1)
elif self.reducer in ['Sum', 'Mean']:
op_input = [self.w,
self.input_record.keys(),
self.input_record.lengths()]
layer_name = 'SparseLengths' + self.reducer
if version in ['fp32', 'fp16']:
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
)
elif version == 'uint8rowwise':
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint4rowwise':
net.__getattr__(layer_name + 'Fused4BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0} for sparse feature {1}".format(
version, self.sparse_key
)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.keys(),
self.output_schema.field_blobs())
else:
raise "Only Sum, Mean, None are supported for IdScoreList input." +\
"Trying to create with {} for sparse feature {}".format(
self.reducer, self.sparse_key
)
def _add_ops(self, net, version='fp32', is_train=True):
if self.evicted_values and is_train:
net.CopyRowsToTensor(
[self.w, self.evicted_values.get(), self.reinit_vec], [self.w])
if _is_id_list(self.input_record):
self._add_ops_id_list(net, version=version)
elif _is_id_score_list(self.input_record):
self._add_ops_id_score_list(net, version=version)
else:
raise "Unsupported input type {0}".format(self.input_record)
def add_train_ops(self, net):
self._add_ops(net, self.trainer_version, is_train=True)
def add_ops(self, net):
version_info = get_current_scope().get(
get_sparse_lookup_predictor_version.__name__, {'version': 'fp32'}
)
lookup_table_blob_size = self.shape[0] * self.shape[1]
version = get_sparse_lookup_predictor_version(
version_info['version'],
blob_size=lookup_table_blob_size,
min_blob_size_4bits=(
version_info['min_blob_size_4bits']
if 'min_blob_size_4bits' in version_info
else None
),
embedding_dim=self.shape[1],
sparse_feature_name=self.sparse_key,
)
# TODO(amalevich): Layer should not be responsible for decision about
# quantization.
if not self.support_8bit() and version in {'uint8rowwise',
'fused_uint8rowwise',
'fused_uint4rowwise'}:
version = 'fp16'
self._add_ops(net, version, is_train=False)
|
pytorch-master
|
caffe2/python/layers/sparse_lookup.py
|
## @package batch_sigmoid_cross_entropy_loss
# Module caffe2.python.layers.batch_sigmoid_cross_entropy_loss
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.tags import Tags
import numpy as np
class BatchSigmoidCrossEntropyLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_sigmoid_cross_entropy_loss',
**kwargs
):
super(BatchSigmoidCrossEntropyLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar(np.float32)),
('prediction', schema.Scalar(np.float32)),
),
input_record
)
assert input_record.prediction.field_type().shape == \
input_record.label.field_type().shape, \
"prediction and label must have the same shape"
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
(np.float32, tuple()), self.get_next_blob_reference('loss')
)
def add_ops(self, net):
sigmoid_cross_entropy = net.SigmoidCrossEntropyWithLogits(
[self.input_record.prediction(), self.input_record.label()],
net.NextScopedBlob('sigmoid_cross_entropy')
)
net.AveragedLoss(
sigmoid_cross_entropy, self.output_schema.field_blobs())
|
pytorch-master
|
caffe2/python/layers/batch_sigmoid_cross_entropy_loss.py
|
from caffe2.python import schema
from caffe2.python.layers.arc_cosine_feature_map import ArcCosineFeatureMap
import numpy as np
class SemiRandomFeatures(ArcCosineFeatureMap):
"""
Implementation of the semi-random kernel feature map.
Applies H(x_rand) * x_rand^s * x_learned, where
H is the Heaviside step function,
x_rand is the input after applying FC with randomized parameters,
and x_learned is the input after applying FC with learnable parameters.
If using multilayer model with semi-random layers, then input and output records
should have a 'full' and 'random' Scalar. The random Scalar will be passed as
input to process the random features.
For more information, see the original paper:
https://arxiv.org/pdf/1702.08882.pdf
Inputs :
output_dims -- dimensions of the output vector
s -- if s == 0, will obtain linear semi-random features;
else if s == 1, will obtain squared semi-random features;
else s >= 2, will obtain higher order semi-random features
scale_random -- amount to scale the standard deviation
(for random parameter initialization when weight_init or
bias_init hasn't been specified)
scale_learned -- amount to scale the standard deviation
(for learned parameter initialization when weight_init or
bias_init hasn't been specified)
weight_init_random -- initialization distribution for random weight parameter
(if None, will use Gaussian distribution)
bias_init_random -- initialization distribution for random bias pararmeter
(if None, will use Uniform distribution)
weight_init_learned -- initialization distribution for learned weight parameter
(if None, will use Gaussian distribution)
bias_init_learned -- initialization distribution for learned bias pararmeter
(if None, will use Uniform distribution)
weight_optim -- optimizer for weight params for learned features
bias_optim -- optimizer for bias param for learned features
set_weight_as_global_constant -- if True, initialized random parameters
will be constant across all distributed
instances of the layer
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
scale_random=1.0,
scale_learned=1.0,
weight_init_random=None,
bias_init_random=None,
weight_init_learned=None,
bias_init_learned=None,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=False,
name='semi_random_features',
**kwargs):
if isinstance(input_record, schema.Struct):
schema.is_schema_subset(
schema.Struct(
('full', schema.Scalar()),
('random', schema.Scalar()),
),
input_record
)
self.input_record_full = input_record.full
self.input_record_random = input_record.random
elif isinstance(input_record, schema.Scalar):
self.input_record_full = input_record
self.input_record_random = input_record
super(SemiRandomFeatures, self).__init__(
model,
self.input_record_full,
output_dims,
s=s,
scale=scale_random, # To initialize the random parameters
weight_init=weight_init_random,
bias_init=bias_init_random,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=set_weight_as_global_constant,
initialize_output_schema=False,
name=name,
**kwargs)
self.output_schema = schema.Struct(
('full', schema.Scalar(
(np.float32, output_dims),
model.net.NextScopedBlob(name + '_full_output')
),),
('random', schema.Scalar(
(np.float32, output_dims),
model.net.NextScopedBlob(name + '_random_output')
),),
)
# To initialize the learnable parameters
assert (scale_learned > 0.0), \
"Expected scale (learned) > 0, got %s" % scale_learned
self.stddev = scale_learned * np.sqrt(1.0 / self.input_dims)
# Learned Parameters
(self.learned_w, self.learned_b) = self._initialize_params(
'learned_w',
'learned_b',
w_init=weight_init_learned,
b_init=bias_init_learned,
w_optim=weight_optim,
b_optim=bias_optim
)
def add_ops(self, net):
# Learned features: wx + b
learned_features = net.FC(self.input_record_full.field_blobs() +
[self.learned_w, self.learned_b],
net.NextScopedBlob('learned_features'))
# Random features: wx + b
random_features = net.FC(self.input_record_random.field_blobs() +
[self.random_w, self.random_b],
net.NextScopedBlob('random_features'))
processed_random_features = self._heaviside_with_power(
net,
random_features,
self.output_schema.random.field_blobs(),
self.s
)
net.Mul([processed_random_features, learned_features],
self.output_schema.full.field_blobs())
|
pytorch-master
|
caffe2/python/layers/semi_random_features.py
|
## @package dot_product
# Module caffe2.python.layers.dot_product
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class PairwiseSimilarity(ModelLayer):
def __init__(self, model, input_record, output_dim, pairwise_similarity_func='dot',
name='pairwise_similarity', **kwargs):
super(PairwiseSimilarity, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Expected Struct, but received: {0}".
format(input_record))
assert (
('all_embeddings' in input_record) ^
('x_embeddings' in input_record and 'y_embeddings' in input_record)
), (
"either (all_embeddings) xor (x_embeddings and y_embeddings) " +
"should be given."
)
self.pairwise_similarity_func = pairwise_similarity_func
if 'all_embeddings' in input_record:
x_embeddings = input_record['all_embeddings']
y_embeddings = input_record['all_embeddings']
else:
x_embeddings = input_record['x_embeddings']
y_embeddings = input_record['y_embeddings']
assert isinstance(x_embeddings, schema.Scalar), (
"Incorrect input type for x. Expected Scalar, " +
"but received: {0}".format(x_embeddings))
assert isinstance(y_embeddings, schema.Scalar), (
"Incorrect input type for y. Expected Scalar, " +
"but received: {0}".format(y_embeddings)
)
if 'indices_to_gather' in input_record:
indices_to_gather = input_record['indices_to_gather']
assert isinstance(indices_to_gather, schema.Scalar), (
"Incorrect type of indices_to_gather. "
"Expected Scalar, but received: {0}".format(indices_to_gather)
)
self.indices_to_gather = indices_to_gather
else:
self.indices_to_gather = None
self.x_embeddings = x_embeddings
self.y_embeddings = y_embeddings
dtype = x_embeddings.field_types()[0].base
self.output_schema = schema.Scalar(
(dtype, (output_dim,)),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
if self.pairwise_similarity_func == "cosine_similarity":
x_embeddings_norm = net.Normalize(self.x_embeddings(), axis=1)
y_embeddings_norm = net.Normalize(self.y_embeddings(), axis=1)
Y = net.BatchMatMul(
[x_embeddings_norm, y_embeddings_norm],
[self.get_next_blob_reference(x_embeddings_norm + '_matmul')],
trans_b=1,
)
elif self.pairwise_similarity_func == "dot":
Y = net.BatchMatMul(
[self.x_embeddings(), self.y_embeddings()],
[self.get_next_blob_reference(self.x_embeddings() + '_matmul')],
trans_b=1,
)
else:
raise NotImplementedError(
"pairwise_similarity_func={} is not valid".format(
self.pairwise_similarity_func
)
)
if self.indices_to_gather:
flattened = net.Flatten(
Y, Y + '_flatten',
)
net.BatchGather(
[flattened, self.indices_to_gather()],
self.output_schema(),
)
else:
net.Flatten(Y, self.output_schema())
|
pytorch-master
|
caffe2/python/layers/pairwise_similarity.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.