python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session's ClusterSpec Propagation.
These tests exercise the ClusterSpec Propagation capabilities of distributed
Sessions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionClusterSpecPropagationTest(test_util.TensorFlowTestCase):
def testClusterSpecPropagationSimple(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config)
output = self.evaluate(const)
self.assertEqual(17, output)
def testClusterSpecPropagationWorker2Placement(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device('/job:worker/task:1'):
with ops.device('/cpu:0'):
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config, graph=g)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
output = sess.run(const, options=run_options, run_metadata=run_metadata)
self.assertEqual(17, output)
self.assertEqual(1,
len([
node_stats
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:worker/replica:0/task:1/device:CPU:0' ==
dev_stats.device and 'Const' == node_stats.node_name
]))
def testClusterSpecPropagationWorker1Placement(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device('/job:worker/task:0'):
const = constant_op.constant(17)
with session.Session(server1.target, config=config, graph=g):
output = self.evaluate(const)
self.assertEqual(17, output)
def testCanonicalDeviceNames(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device(
'/job:worker/task:1/device:CPU:0'):
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config, graph=g)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
output = sess.run(const, options=run_options, run_metadata=run_metadata)
self.assertEqual(17, output)
self.assertEqual(1,
len([
node_stats
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:worker/replica:0/task:1/device:CPU:0' ==
dev_stats.device and 'Const' == node_stats.node_name
]))
def testFullDeviceNames(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'renamed_worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device(
'/job:renamed_worker/replica:0/task:1/device:CPU:0'):
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config, graph=g)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
output = sess.run(const, options=run_options, run_metadata=run_metadata)
self.assertEqual(17, output)
self.assertEqual(1,
len([
node_stats
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:renamed_worker/replica:0/task:1/device:CPU:0'
== dev_stats.device and 'Const' == node_stats.node_name
]))
def testMultipleLocalDevices(self):
# Note: CPU->CPU transfers have a fast-path in
# BaseRemoteRendezvous::SameWorkerRecvDone that means the test doesn't
# actually capture the motivating bug unless run on a GPU machine.
#
# Example error message (before bugfix -- line breaks added because lint):
#
# W0718 17:14:41.521534 190121 device_mgr.cc:107] Unknown device:
# /job:worker/replica:0/task:0/device:CPU:0 all devices:
# /job:local/replica:0/task:0/device:GPU:0,
# /job:local/replica:0/task:0/device:GPU:0,
# /job:local/replica:0/task:0/cpu:1, CPU:0, GPU:0,
# /job:local/replica:0/task:0/device:CPU:1,
# /job:local/replica:0/task:0/device:CPU:0, CPU:1,
# /job:local/replica:0/task:0/cpu:0
server_config = config_pb2.ConfigProto(device_count={'CPU': 2})
server1 = server_lib.Server.create_local_server(config=server_config)
server2 = server_lib.Server.create_local_server(config=server_config)
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g:
with ops.device('/job:worker/task:1/cpu:1'):
input1 = constant_op.constant(17, dtypes.float32)
with ops.device('/job:worker/task:0/cpu:1'):
input2 = constant_op.constant(3, dtypes.float32)
with ops.device('/job:worker/task:1/cpu:0'):
sum1 = input1 + input2
if test.is_gpu_available():
device_str = '/job:worker/task:0/device:GPU:0'
else:
device_str = '/job:worker/task:0/cpu:1'
with ops.device(device_str):
sum2 = input2 + input1
with ops.device('/job:worker/task:0/cpu:0'):
sum3 = sum1 + sum2
with session.Session(server1.target, config=config, graph=g):
output = self.evaluate(sum3)
self.assertEqual(40, output)
def testLegacyDeviceNames(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device('/job:worker/task:1/cpu:0'):
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config, graph=g)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
output = sess.run(const, options=run_options, run_metadata=run_metadata)
self.assertEqual(17, output)
self.assertEqual(1,
len([
node_stats
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:worker/replica:0/task:1/device:CPU:0' ==
dev_stats.device and 'Const' == node_stats.node_name
]))
def testClusterSpecPropagationThreeServers2Graphs(self):
"""Boots 3 servers, creates 2 sessions, ensures appropriate operations.
We create 2 clusterspecs:
1. server2 as the master, server1 as a worker
2. server2 as the master, server3 as a worker
We ensure that variables on the workers are independent.
"""
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
server3 = server_lib.Server.create_local_server()
cluster_def1 = cluster_pb2.ClusterDef()
job1 = cluster_def1.job.add()
job1.name = 'worker1'
job1.tasks[0] = server2.target[len('grpc://'):]
job1.tasks[1] = server1.target[len('grpc://'):]
cluster_def2 = cluster_pb2.ClusterDef()
job2 = cluster_def2.job.add()
job2.name = 'worker2'
job2.tasks[0] = server2.target[len('grpc://'):]
job2.tasks[1] = server3.target[len('grpc://'):]
config1 = config_pb2.ConfigProto(cluster_def=cluster_def1)
config2 = config_pb2.ConfigProto(cluster_def=cluster_def2)
with ops.Graph().as_default() as g1:
with ops.device('/job:worker1/task:1'):
var1 = variables.Variable(array_ops.zeros([2]), name='var1')
update_op1 = state_ops.assign_add(
var1, array_ops.ones([2]), name='var1_assign_add')
init1 = variables.global_variables_initializer()
with ops.Graph().as_default() as g2:
with ops.device('/job:worker2/task:1'):
var2 = variables.Variable(array_ops.zeros([2]), name='var2')
update_op2 = state_ops.assign_add(
var2, array_ops.ones([2]), name='var2_assign_add')
init2 = variables.global_variables_initializer()
sess1 = session.Session(server2.target, graph=g1, config=config1)
sess2 = session.Session(server2.target, graph=g2, config=config2)
init1.run(session=sess1)
init2.run(session=sess2)
expected_zeros = np.zeros([2])
expected_ones = np.ones([2])
self.assertAllEqual(expected_zeros, sess1.run(var1))
self.assertAllEqual(expected_zeros, sess2.run(var2))
self.assertAllEqual(expected_ones, sess1.run(update_op1))
self.assertAllEqual(expected_ones, sess1.run(var1))
self.assertAllEqual(expected_zeros, sess2.run(var2))
self.assertAllEqual(expected_ones, sess2.run(update_op2))
self.assertAllEqual(expected_ones + expected_ones, sess1.run(update_op1))
self.assertAllEqual(expected_ones, sess2.run(var2))
self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1))
def testClusterSpecPropagationThreeServers(self):
"""Boots 3 servers, creates 2 sessions, ensures appropriate operations.
We create 2 clusterspecs:
1. server2 as the master, server1 as a worker
2. server2 as the master, server3 as a worker
We ensure that variables on the workers are independent.
"""
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
server3 = server_lib.Server.create_local_server()
cluster_def1 = cluster_pb2.ClusterDef()
job1 = cluster_def1.job.add()
job1.name = 'worker'
job1.tasks[0] = server2.target[len('grpc://'):]
job1.tasks[1] = server1.target[len('grpc://'):]
cluster_def2 = cluster_pb2.ClusterDef()
job2 = cluster_def2.job.add()
job2.name = 'worker'
job2.tasks[0] = server2.target[len('grpc://'):]
job2.tasks[1] = server3.target[len('grpc://'):]
config1 = config_pb2.ConfigProto(cluster_def=cluster_def1)
config2 = config_pb2.ConfigProto(cluster_def=cluster_def2)
with ops.device('/job:worker/task:1'):
var = variables.Variable(array_ops.zeros([2]), name='var')
feed = array_ops.placeholder(dtypes.float32, shape=(2))
update_op = var.assign_add(feed)
sess1 = session.Session(server2.target, config=config1)
sess2 = session.Session(server2.target, config=config2)
variables.global_variables_initializer().run(session=sess1)
variables.global_variables_initializer().run(session=sess2)
expected_zeros = np.zeros([2])
expected_ones = np.ones([2])
self.assertAllEqual(expected_zeros, sess1.run(var))
self.assertAllEqual(expected_zeros, sess2.run(var))
self.assertAllEqual(expected_ones,
sess1.run(update_op, feed_dict={feed: expected_ones}))
self.assertAllEqual(expected_ones, sess1.run(var))
self.assertAllEqual(expected_zeros, sess2.run(var))
self.assertAllEqual(expected_ones,
sess2.run(update_op, feed_dict={feed: expected_ones}))
self.assertAllEqual(expected_ones + expected_ones,
sess1.run(update_op, feed_dict={feed: expected_ones}))
self.assertAllEqual(expected_ones, sess2.run(var))
self.assertAllEqual(expected_ones + expected_ones, sess1.run(var))
def testClusterSpecPropagationThreeServersOneCluster(self):
"""Boots 3 servers, ensures appropriate communication across workers.
Additionally, in this cluster, we ensure the master is not the 0-th worker.
Note: this test only uses one session.
"""
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
server3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server3.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
job.tasks[2] = server1.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
# Add ops to the devices in non-linear order.
with ops.device('/job:worker/task:1'):
feed1 = array_ops.placeholder(dtypes.float32, shape=(2))
const1 = constant_op.constant(2.0)
mul1 = const1 * feed1
with ops.device('/job:worker/task:2'):
feed2 = array_ops.placeholder(dtypes.float32, shape=(2))
const2 = constant_op.constant(2.0)
mul2 = const2 * feed2
with ops.device('/job:worker/task:0'):
feed0 = array_ops.placeholder(dtypes.float32, shape=(2))
const0 = constant_op.constant(2.0)
mul0 = const0 * feed0
sum_op = mul0 + mul1 + mul2
ones = np.ones([2])
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
# Run!
with session.Session(server1.target, config=config) as sess:
output = sess.run(
sum_op,
options=run_options,
run_metadata=run_metadata,
feed_dict={feed1: ones,
feed2: ones,
feed0: ones})
self.assertAllEqual(6 * ones, output)
self.assertEqual(
3,
len([
dev_stats.device
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:worker/replica:0/task:' in dev_stats.device and
node_stats.node_name.startswith('Const')
]), run_metadata)
def testClusterSpecPropagationIsolation(self):
"""Test that two sessions using ClusterSpec propagation are isolated."""
server = server_lib.Server.create_local_server()
init_value = array_ops.placeholder(dtypes.int32, shape=[])
v = variables.Variable(init_value)
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
sess1 = session.Session(server.target, config=config)
sess2 = session.Session(server.target, config=config)
# Initially, the variable is uninitialized in both sessions.
with self.assertRaises(errors.FailedPreconditionError):
sess1.run(v)
with self.assertRaises(errors.FailedPreconditionError):
sess2.run(v)
# An update in sess1 should be visible in sess1 only.
sess1.run(v.initializer, feed_dict={init_value: 37})
self.assertEqual(37, sess1.run(v))
with self.assertRaises(errors.FailedPreconditionError):
sess2.run(v)
# An update in sess2 should be visible in sess2 only.
sess2.run(v.initializer, feed_dict={init_value: 86})
self.assertEqual(37, sess1.run(v))
self.assertEqual(86, sess2.run(v))
# Closing sess2 has no effect on the state of sess1.
sess2.close()
self.assertEqual(37, sess1.run(v))
# Subsequent sessions will not see the state of existing sessions.
sess3 = session.Session(server.target, config=config)
self.assertEqual(37, sess1.run(v))
with self.assertRaises(errors.FailedPreconditionError):
sess3.run(v)
def testClusterSpecPropagationNonIsolation(self):
"""Test that two sessions using ClusterSpec propagation shares state.
For example, the updated Variable value are visible among all worker
sessions registered in the same server.
"""
server = server_lib.Server.create_local_server()
init_value = array_ops.placeholder(dtypes.int32, shape=[])
v = variables.Variable(init_value)
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
config.experimental.share_session_state_in_clusterspec_propagation = True
sess1 = session.Session(server.target, config=config)
sess2 = session.Session(server.target, config=config)
# Initially, the variable is uninitialized in both sessions.
with self.assertRaises(errors.FailedPreconditionError):
sess1.run(v)
with self.assertRaises(errors.FailedPreconditionError):
sess2.run(v)
# An update in sess1 should be visible in sess2.
sess1.run(v.initializer, feed_dict={init_value: 37})
self.assertEqual(37, sess1.run(v))
self.assertEqual(37, sess2.run(v))
# Closing sess2 has no effect on the state of sess1.
sess2.close()
self.assertEqual(37, sess1.run(v))
# Subsequent sessions should see the state of existing sessions.
sess3 = session.Session(server.target, config=config)
self.assertEqual(37, sess1.run(v))
self.assertEqual(37, sess3.run(v))
def testClusterSpecPropagationNonIsolation2Graphs(self):
"""Creates 2 sessions with each own graph, ensures appropriate operations.
We ensure that variables on the workers shares state.
"""
server = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
config.experimental.share_session_state_in_clusterspec_propagation = True
with ops.Graph().as_default() as g1:
var1 = variables.Variable(array_ops.zeros([2]), name='var')
update_op1 = state_ops.assign_add(
var1, array_ops.ones([2]), name='var1_assign_add')
init1 = variables.global_variables_initializer()
with ops.Graph().as_default() as g2:
var2 = variables.Variable(array_ops.zeros([2]), name='var')
update_op2 = state_ops.assign_add(
var2, array_ops.ones([2]), name='var2_assign_add')
sess1 = session.Session(server.target, graph=g1, config=config)
sess2 = session.Session(server.target, graph=g2, config=config)
expected_zeros = np.zeros([2])
expected_ones = np.ones([2])
init1.run(session=sess1)
self.assertAllEqual(expected_zeros, sess1.run(var1))
self.assertAllEqual(expected_zeros, sess2.run(var2))
self.assertAllEqual(expected_ones, sess1.run(update_op1))
self.assertAllEqual(expected_ones, sess1.run(var1))
self.assertAllEqual(expected_ones, sess2.run(var2))
self.assertAllEqual(expected_ones + expected_ones, sess2.run(update_op2))
self.assertAllEqual(expected_ones + expected_ones, sess2.run(var2))
self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1))
def testClusterSpecPropagationPartialRun(self):
"""Test successful partial run with ClusterSpec propagation."""
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.device('/job:worker/task:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
with ops.device('/job:worker/task:1'):
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
with ops.device('/job:worker/task:0'):
r2 = math_ops.multiply(r1, c)
with session.Session(server1.target, config=config) as sess:
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
res = sess.partial_run(h, r2, feed_dict={c: 3})
self.assertEqual(9, res)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/client/session_clusterspec_prop_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SWIG-wrapped device lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class DeviceLibTest(test_util.TensorFlowTestCase):
def testListLocalDevices(self):
devices = device_lib.list_local_devices()
self.assertGreater(len(devices), 0)
self.assertEqual(devices[0].device_type, "CPU")
devices = device_lib.list_local_devices(config_pb2.ConfigProto())
self.assertGreater(len(devices), 0)
self.assertEqual(devices[0].device_type, "CPU")
# GPU test
if test.is_gpu_available():
self.assertGreater(len(devices), 1)
self.assertTrue("GPU" in [d.device_type for d in devices] or
"SYCL" in [d.device_type for d in devices])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/client/device_lib_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session's partial run APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class PartialRunTest(test_util.TensorFlowTestCase):
def RunTestPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def RunTestPartialRunIncomplete(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def RunTestConcurrentPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def RunTestManyPartialRun(self, sess):
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.multiply(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def RunTestRunAndPartialRun(self, sess):
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = self.evaluate([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def RunTestPartialRunMissingPlaceholderFeedException(self, sess):
x = array_ops.placeholder(dtypes.float32, shape=())
fetches = [x * 2, x * 3]
handle = sess.partial_run_setup(fetches=fetches, feeds=[])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'You must feed a value for placeholder'):
sess.partial_run(handle, fetches[0])
def RunTestPartialRunUnspecifiedFeed(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
h = sess.partial_run_setup([r1], [a, b])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'was not specified in partial_run_setup.$'):
sess.partial_run(h, r1, feed_dict={a: 1, b: 2, c: 3})
def RunTestPartialRunUnspecifiedFetch(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(a, c)
h = sess.partial_run_setup([r1], [a, b, c])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'was not specified in partial_run_setup.$'):
sess.partial_run(h, r2, feed_dict={a: 1, c: 3})
def RunTestPartialRunAlreadyFed(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(a, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'has already been fed.$'):
sess.partial_run(h, r2, feed_dict={a: 1, c: 3})
def RunTestPartialRunAlreadyFetched(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(a, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'has already been fetched.$'):
sess.partial_run(h, r1, feed_dict={c: 3})
def RunTestPartialRunEmptyFetches(self, sess):
a = array_ops.placeholder(dtypes.float32)
b = a * 2.0
h = sess.partial_run_setup(fetches=[b], feeds=[a])
sess.partial_run(h, [], {a: 3.0})
r = sess.partial_run(h, [b], {})
self.assertEqual([6.0], r)
@test_util.run_deprecated_v1
def testInvalidPartialRunSetup(self):
sess = session.Session()
x = array_ops.placeholder(dtypes.float32, shape=[])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'specify at least one target to fetch or execute.'):
sess.partial_run_setup(fetches=[], feeds=[x])
@test_util.run_deprecated_v1
def testPartialRunSetupNoFeedsPassed(self):
sess = session.Session()
r1 = constant_op.constant([6.0])
h = sess.partial_run_setup([r1])
result1 = sess.partial_run(h, r1)
self.assertEqual([6.0], result1)
@test_util.run_deprecated_v1
def testPartialRunDirect(self):
self.RunTestPartialRun(session.Session())
@test_util.run_deprecated_v1
def testPartialRunIncompleteDirect(self):
self.RunTestPartialRunIncomplete(session.Session())
@test_util.run_deprecated_v1
def testConcurrentPartialRunDirect(self):
self.RunTestConcurrentPartialRun(session.Session())
@test_util.run_deprecated_v1
def testManyPartialRunDirect(self):
self.RunTestManyPartialRun(session.Session())
@test_util.run_deprecated_v1
def testRunAndPartialRunDirect(self):
self.RunTestRunAndPartialRun(session.Session())
@test_util.run_deprecated_v1
def testPartialRunMissingPlaceholderFeedExceptionDirect(self):
self.RunTestPartialRunMissingPlaceholderFeedException(session.Session())
@test_util.run_deprecated_v1
def testPartialRunUnspecifiedFeedDirect(self):
self.RunTestPartialRunUnspecifiedFeed(session.Session())
@test_util.run_deprecated_v1
def testPartialRunUnspecifiedFetchDirect(self):
self.RunTestPartialRunUnspecifiedFetch(session.Session())
@test_util.run_deprecated_v1
def testPartialRunAlreadyFedDirect(self):
self.RunTestPartialRunAlreadyFed(session.Session())
@test_util.run_deprecated_v1
def testPartialRunAlreadyFetchedDirect(self):
self.RunTestPartialRunAlreadyFetched(session.Session())
@test_util.run_deprecated_v1
def testPartialRunEmptyFetchesDirect(self):
self.RunTestPartialRunEmptyFetches(session.Session())
@test_util.run_deprecated_v1
def testPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRun(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunIncompleteDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunIncomplete(session.Session(server.target))
@test_util.run_deprecated_v1
def testConcurrentPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.RunTestConcurrentPartialRun(session.Session(server.target))
@test_util.run_deprecated_v1
def testManyPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.RunTestManyPartialRun(session.Session(server.target))
@test_util.run_deprecated_v1
def testRunAndPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.RunTestRunAndPartialRun(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunMissingPlaceholderFeedExceptionDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunMissingPlaceholderFeedException(
session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunUnspecifiedFeedDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunUnspecifiedFeed(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunUnspecifiedFetchDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunUnspecifiedFetch(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunAlreadyFedDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunAlreadyFed(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunAlreadyFetchedDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunAlreadyFetched(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunEmptyFetchesDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunEmptyFetches(session.Session(server.target))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/client/session_partial_run_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for launching graphs and executing operations.
See the [Client](https://www.tensorflow.org/guide/graphs) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.client.session import InteractiveSession
from tensorflow.python.client.session import Session
from tensorflow.python.framework import errors
from tensorflow.python.framework.errors import OpError
from tensorflow.python.framework.ops import get_default_session
|
tensorflow-master
|
tensorflow/python/client/client_lib.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
import threading
import warnings
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(
fetched_vals[0], fetched_vals[1],
fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor, lambda fetch: ([
fetch.indices, fetch.values, fetch.dense_shape
], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(
zip([feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape
], _get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed])
]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds of
one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
Raises:
ValueError: If `tensor_type` has already been registered.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.' %
tensor_type)
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
def _is_attrs_instance(obj):
"""Returns True if the given obj is an instance of attrs-decorated class."""
return getattr(obj.__class__, '__attrs_attrs__', None) is not None
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, '__attrs_attrs__')
return [getattr(obj, a.name) for a in attrs]
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond exactly to
the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, collections.Mapping):
return _DictFetchMapper(fetch)
elif _is_attrs_instance(fetch):
return _AttrsFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _AttrsFetchMapper(_FetchMapper):
"""Fetch mapper for attrs decorated classes."""
def __init__(self, fetches):
"""Creates a _AttrsFetchMapper.
Args:
fetches: An instance of an attrs decorated class.
"""
values = _get_attrs_values(fetches)
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
return self._fetch_type(*results)
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise ValueError('Operation %r has been marked as not fetchable.' %
op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i] in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i]].eval()
else:
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
self._incarnation = incarnation
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
@property
def incarnation(self):
return self._incarnation
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
self.incarnation,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s' %
type(config))
if (mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.non_mixed_precision_session_created = True
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: ' +
e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles. '
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' +
str(type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate(message, self._graph)
raise type(e)(node_def, op, message)
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
# TODO(b/74355905): Support argument and return value nested structures,
# and tensor-like objects such as SparseTensors.
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
# TODO(b/74355905): Reimplement `Session.make_callable()` using this method
# where possible.
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
`tf.Variable`, `tf.queue.QueueBase`,
and `tf.compat.v1.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.compat.v1.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.compat.v1.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine. See
[Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for
more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
# If we are closing due to an exception, set a time limit on our Close() to
# avoid blocking forever.
# TODO(b/120204635) remove this when deadlock is fixed.
if exec_type:
close_thread = threading.Thread(
name='SessionCloseThread', target=self.close)
close_thread.daemon = True
close_thread.start()
close_thread.join(30.0)
if close_thread.is_alive():
logging.error(
'Session failed to close after 30 seconds. Continuing after this '
'point may leave your program in an undefined state.')
else:
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export(v1=['InteractiveSession'])
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.compat.v1.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.compat.v1.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
|
tensorflow-master
|
tensorflow/python/client/session.py
|
tensorflow-master
|
tensorflow/python/client/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.Timeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.client import timeline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class TimelineTest(test.TestCase):
def _validateTrace(self, chrome_trace_format):
# Check that the supplied string is valid JSON.
trace = json.loads(chrome_trace_format)
# It should have a top-level key containing events.
self.assertTrue('traceEvents' in trace)
# Every event in the list should have a 'ph' field.
for event in trace['traceEvents']:
self.assertTrue('ph' in event)
def testSimpleTimeline(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
@test_util.deprecated_graph_mode_only
def testTimelineCpu(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.session(use_gpu=False) as sess:
const1 = constant_op.constant(1.0, name='const1')
const2 = constant_op.constant(2.0, name='const2')
result = math_ops.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(
show_memory=False, show_dataflow=False)
self._validateTrace(ctf)
@test_util.deprecated_graph_mode_only
def testTimelineGpu(self):
if not test.is_gpu_available(cuda_only=True):
return
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.session(force_gpu=True) as sess:
const1 = constant_op.constant(1.0, name='const1')
const2 = constant_op.constant(2.0, name='const2')
result = math_ops.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices)
self.assertTrue('/device:GPU:0/stream:all' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(
show_memory=False, show_dataflow=False)
self._validateTrace(ctf)
def testTimelineWithRPCs(self):
"""Tests that Timeline can handle RPC tracing."""
metadata = config_pb2.RunMetadata()
step_stats = metadata.step_stats
dev_stats = step_stats.dev_stats.add()
dev_stats.device = '/job:worker/replica:0/task:0/cpu:0'
node_stats = dev_stats.node_stats.add()
node_stats.node_name = 'RecvTensor'
node_stats.all_start_micros = 12345
node_stats.op_end_rel_micros = 42
node_stats.timeline_label = ('[1024B] edge_160_conv2/biases/read from '
'/job:ps/replica:0/task:3/cpu:0 to '
'/job:worker/replica:0/task:0/cpu:0')
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
def testAnalysisAndAllocations(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
config = config_pb2.ConfigProto(device_count={'CPU': 3})
with session.Session(config=config) as sess:
with ops.device('/cpu:0'):
num1 = variables.Variable(1.0, name='num1')
with ops.device('/cpu:1'):
num2 = variables.Variable(2.0, name='num2')
with ops.device('/cpu:2'):
result = num1 + num2 + num1 * num2
self.evaluate(variables.global_variables_initializer())
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
step_analysis = tl.analyze_step_stats()
ctf = step_analysis.chrome_trace.format_to_string()
self._validateTrace(ctf)
maximums = step_analysis.allocator_maximums
cpuname = 'mklcpu' if test_util.IsMklEnabled() else 'cpu'
self.assertTrue(cpuname in maximums)
cpu_max = maximums[
'cuda_host_bfc'] if 'cuda_host_bfc' in maximums else maximums[cpuname]
# At least num1 + num2, both float32s (4 bytes each)
self.assertGreaterEqual(cpu_max.num_bytes, 8)
self.assertGreater(cpu_max.timestamp, 0)
def testManyCPUs(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
config = config_pb2.ConfigProto(device_count={'CPU': 3})
with session.Session(config=config) as sess:
with ops.device('/cpu:0'):
num1 = variables.Variable(1.0, name='num1')
with ops.device('/cpu:1'):
num2 = variables.Variable(2.0, name='num2')
with ops.device('/cpu:2'):
result = num1 + num2 + num1 * num2
self.evaluate(variables.global_variables_initializer())
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:1' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:2' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(
show_memory=False, show_dataflow=False)
self._validateTrace(ctf)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/client/timeline_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session's list_devices API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
class SessionListDevicesTest(test_util.TensorFlowTestCase):
def testListDevices(self):
with session.Session() as sess:
devices = sess.list_devices()
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in set(
[d.name for d in devices]), devices)
# All valid device incarnations must be non-zero.
self.assertTrue(all(d.incarnation != 0 for d in devices))
def testInvalidDeviceNumber(self):
opts = tf_session.TF_NewSessionOptions()
c_session = tf_session.TF_NewSession(ops.get_default_graph()._c_graph, opts)
raw_device_list = tf_session.TF_SessionListDevices(c_session)
size = tf_session.TF_DeviceListCount(raw_device_list)
with self.assertRaises(errors.InvalidArgumentError):
tf_session.TF_DeviceListMemoryBytes(raw_device_list, size)
tf_session.TF_DeleteDeviceList(raw_device_list)
tf_session.TF_CloseSession(c_session)
def testListDevicesGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target) as sess:
devices = sess.list_devices()
self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in set(
[d.name for d in devices]), devices)
# All valid device incarnations must be non-zero.
self.assertTrue(all(d.incarnation != 0 for d in devices))
def testListDevicesClusterSpecPropagation(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with session.Session(server1.target, config=config) as sess:
devices = sess.list_devices()
device_names = set(d.name for d in devices)
self.assertTrue(
'/job:worker/replica:0/task:0/device:CPU:0' in device_names)
self.assertTrue(
'/job:worker/replica:0/task:1/device:CPU:0' in device_names)
# All valid device incarnations must be non-zero.
self.assertTrue(all(d.incarnation != 0 for d in devices))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/client/session_list_devices_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for interacting with the `tf.compat.v1.Session`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SessionBenchmark(test.Benchmark):
"""Tests and benchmarks for interacting with the `tf.compat.v1.Session`."""
def _benchmarkFeed(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of feeding a tensor.
Reports the median cost of feeding a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be feed.
iters: The number of iterations to perform.
"""
feed_val = np.random.rand(size).astype(np.float32)
times = []
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=[size])
# Fetch the operation rather than the tensor, to avoid measuring the time
# to fetch back the value.
no_op = array_ops.identity(p).op
with session.Session(target) as sess:
sess.run(no_op, feed_dict={p: feed_val}) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(no_op, feed_dict={p: feed_val})
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkFetch(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of fetching a tensor.
Reports the median cost of fetching a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be fetched.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the tensor to be fetched as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([size]))
with session.Session(target) as sess:
sess.run(v.initializer)
sess.run(v) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(v)
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkFetchPrebuilt(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of fetching a tensor.
Reports the median cost of fetching a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be fetched.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the tensor to be fetched as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([size]))
with session.Session(target) as sess:
sess.run(v.initializer)
runner = sess.make_callable(v)
runner() # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
runner()
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkRunOp(self, name, target, iters):
"""Runs a microbenchmark to measure the cost of running an op.
Reports the median cost of running a trivial (Variable) op.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the op to be run as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([]))
with session.Session(target) as sess:
sess.run(v.initializer)
sess.run(v.op) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(v.op)
end_time = time.time()
times.append(end_time - start_time)
print("%s %f" % (name, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkRunOpPrebuilt(self, name, target, iters):
"""Runs a microbenchmark to measure the cost of running an op.
Reports the median cost of running a trivial (Variable) op.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the op to be run as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([]))
with session.Session(target) as sess:
sess.run(v.initializer)
runner = sess.make_callable(v.op)
runner() # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
runner()
end_time = time.time()
times.append(end_time - start_time)
print("%s %f" % (name, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def benchmarkGrpcSession(self):
server = server_lib.Server.create_local_server()
self._benchmarkFeed("benchmark_session_feed_grpc_4B", server.target, 1,
30000)
session.Session.reset(server.target)
self._benchmarkFeed("benchmark_session_feed_grpc_4MB", server.target,
1 << 20, 25000)
session.Session.reset(server.target)
self._benchmarkFetch("benchmark_session_fetch_grpc_4B", server.target, 1,
40000)
session.Session.reset(server.target)
self._benchmarkFetch("benchmark_session_fetch_grpc_4MB", server.target,
1 << 20, 20000)
session.Session.reset(server.target)
self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_grpc_4B",
server.target, 1, 50000)
session.Session.reset(server.target)
self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_grpc_4MB",
server.target, 1 << 20, 50000)
session.Session.reset(server.target)
self._benchmarkRunOp("benchmark_session_runop_grpc", server.target, 50000)
session.Session.reset(server.target)
self._benchmarkRunOpPrebuilt("benchmark_session_runopprebuilt_grpc",
server.target, 100000)
session.Session.reset(server.target)
def benchmarkDirectSession(self):
self._benchmarkFeed("benchmark_session_feed_direct_4B", "", 1, 80000)
self._benchmarkFeed("benchmark_session_feed_direct_4MB", "", 1 << 20, 20000)
self._benchmarkFetch("benchmark_session_fetch_direct_4B", "", 1, 100000)
self._benchmarkFetch("benchmark_session_fetch_direct_4MB", "", 1 << 20,
20000)
self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_direct_4B",
"", 1, 200000)
self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_direct_4MB",
"", 1 << 20, 200000)
self._benchmarkRunOp("benchmark_session_runop_direct", "", 200000)
self._benchmarkRunOpPrebuilt("benchmark_session_runopprebuilt_direct", "",
200000)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/client/session_benchmark.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = (
[sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/python/client/notebook.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multiple virtual GPU support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class VirtualGpuTestUtil(object):
def __init__(self,
dim=1000,
num_ops=100,
virtual_devices_per_gpu=None,
device_probabilities=None):
self._dim = dim
self._num_ops = num_ops
if virtual_devices_per_gpu is None:
self._virtual_devices_per_gpu = [3]
else:
self._virtual_devices_per_gpu = virtual_devices_per_gpu
self._visible_device_list = [
i for i in range(len(self._virtual_devices_per_gpu))
]
gpu_devices = [
('/gpu:' + str(i)) for i in range(sum(self._virtual_devices_per_gpu))
]
self.devices = ['/cpu:0'] + gpu_devices
self._num_devices = len(self.devices)
# Each virtual device gets 2GB memory.
self._mem_limits_mb = [
([1 << 11] * i) for i in self._virtual_devices_per_gpu
]
self.config = self._GetSessionConfig()
if device_probabilities is not None:
self._device_probabilities = list(device_probabilities) # Deep copy
for i in range(1, self._num_devices):
self._device_probabilities[i] += self._device_probabilities[i - 1]
else:
# Each device gets same probability to be assigned an operation.
step = 1.0 / self._num_devices
self._device_probabilities = [
(x + 1) * step for x in range(self._num_devices)
]
# To prevent rounding error causing problems.
self._device_probabilities[self._num_devices - 1] = 1.1
logging.info('dim: %d', self._dim)
logging.info('num_ops: %d', self._num_ops)
logging.info('visible_device_list: %s', str(self._visible_device_list))
logging.info('virtual_devices_per_gpu: %s',
str(self._virtual_devices_per_gpu))
logging.info('mem_limits: %s', str(self._mem_limits_mb))
logging.info('devices: %s', str(self.devices))
logging.info('config: %s', text_format.MessageToString(self.config))
logging.info('device_probabilities: %s', str(self._device_probabilities))
# Creates virtual GPU devices
def _GetSessionConfig(self):
virtual_device_gpu_options = config_pb2.GPUOptions(
visible_device_list=','.join(str(d) for d in self._visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(virtual_devices=[
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=i) for i in self._mem_limits_mb
]))
return config_pb2.ConfigProto(gpu_options=virtual_device_gpu_options)
# Generates a list of 3-tuples, each tuple contains the source and destination
# device index for a binary operation like 'add', like:
# (src_devcie_1, src_device_2, dst_device)
def _GenerateOperationPlacement(self):
result = []
for unused_i in range(self._num_ops):
op_device = ()
for unused_j in range(3):
random_num = random.random()
for device_index in range(self._num_devices):
if self._device_probabilities[device_index] > random_num:
op_device += (device_index,)
break
result.append(op_device)
return result
# Logs part of the matrix for debugging purposes.
def _LogMatrix(self, mat, dim):
logging.info('---- printing the first 10*10 submatrix ----')
for i in range(min(10, dim)):
row = ''
for j in range(min(10, dim)):
row += ' ' + str(mat[i][j])
logging.info(row)
# Runs a list of 'add' operations where each operation satisfies the device
# placement constraints in `op_placement`, and returns the result.
def _TestRandomGraphWithDevices(self,
sess,
seed,
op_placement,
devices,
debug_mode=False):
data = []
shape = (self._dim, self._dim)
feed_dict = {}
# Initialize the matrices
for i in range(len(devices)):
with ops.device(devices[i]):
var = array_ops.placeholder(dtypes.float32, shape=shape)
np.random.seed(seed + i)
feed_dict[var] = np.random.uniform(
low=0, high=0.1, size=shape).astype(np.float32)
data.append(var)
# Run the 'add' operations on those matrices
for op in op_placement:
with ops.device(devices[op[2]]):
data[op[2]] = math_ops.add(data[op[0]], data[op[1]])
with ops.device('/cpu:0'):
s = data[0]
for i in range(1, len(data)):
s = math_ops.add(s, data[i])
if debug_mode:
logging.info(ops.get_default_graph().as_graph_def())
result = sess.run(s, feed_dict=feed_dict)
self._LogMatrix(result, self._dim)
return result
# Generates a random graph with `self._num_ops` 'add' operations with each
# operation placed on different virtual device, test that the result is
# identical to the result obtained by running the same graph on cpu only.
def TestRandomGraph(self, sess, op_placement=None, random_seed=None):
debug_mode = False
if op_placement is None:
op_placement = self._GenerateOperationPlacement()
else:
debug_mode = True
if random_seed is None:
random_seed = random.randint(0, 1 << 31)
else:
debug_mode = True
logging.info('Virtual gpu functional test for random graph...')
logging.info('operation placement: %s', str(op_placement))
logging.info('random seed: %d', random_seed)
# Run with multiple virtual gpus.
result_vgd = self._TestRandomGraphWithDevices(
sess, random_seed, op_placement, self.devices, debug_mode=debug_mode)
# Run with single cpu.
result_cpu = self._TestRandomGraphWithDevices(
sess,
random_seed,
op_placement, ['/cpu:0'] * self._num_devices,
debug_mode=debug_mode)
# Test the result
for i in range(self._dim):
for j in range(self._dim):
if result_vgd[i][j] != result_cpu[i][j]:
logging.error(
'Result mismatch at row %d column %d: expected %f, actual %f', i,
j, result_cpu[i][j], result_vgd[i][j])
logging.error('Devices: %s', self.devices)
logging.error('Memory limits (in MB): %s', self._mem_limits_mb)
return False
return True
class VirtualGpuTest(test_util.TensorFlowTestCase):
def __init__(self, method_name):
super(VirtualGpuTest, self).__init__(method_name)
self._util = VirtualGpuTestUtil()
@test_util.deprecated_graph_mode_only
def testStatsContainAllDeviceNames(self):
with self.session(config=self._util.config) as sess:
# TODO(laigd): b/70811538. The is_gpu_available() call will invoke
# DeviceFactory::AddDevices() with a default SessionOption, which prevents
# adding virtual devices in the future, thus must be called within a
# context of a session within which virtual devices are created. Same in
# the following test case.
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
mat_shape = [10, 10]
data = []
for d in self._util.devices:
with ops.device(d):
var = variables.Variable(random_ops.random_uniform(mat_shape))
self.evaluate(var.initializer)
data.append(var)
s = data[0]
for i in range(1, len(data)):
s = math_ops.add(s, data[i])
sess.run(s, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:1' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:2' in devices)
@test_util.deprecated_graph_mode_only
def testLargeRandomGraph(self):
with self.session(config=self._util.config) as sess:
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
for _ in range(5):
if not self._util.TestRandomGraph(sess):
return
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/client/virtual_gpu_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import os
import sys
import threading
import time
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as framework_device_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
# Import gradients to resolve circular imports
from tensorflow.python.ops import gradients # pylint: disable=unused-import
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionTest, self).setUp()
warnings.simplefilter('always')
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(
np.asarray(
[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32),
copy_val)
def testManyCPUs(self):
with session.Session(
config=config_pb2.ConfigProto(device_count={
'CPU': 2, 'GPU': 0
})) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
num_cpu_devices = 0
num_gpu_devices = 0
for device in sess.list_devices():
device_type = framework_device_lib.DeviceSpec.from_string(
device.name).device_type
if device_type == 'CPU':
num_cpu_devices += 1
elif device_type == 'GPU':
num_gpu_devices += 1
self.assertEqual(2, num_cpu_devices)
self.assertEqual(0, num_gpu_devices)
def testPerSessionThreads(self):
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config_pb = config_pb2.ConfigProto()
pool = config_pb.session_inter_op_thread_pool.add()
with session.Session(config=config_pb) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config_pb) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config_pb.session_inter_op_thread_pool) - 1)
with session.Session(config=config_pb) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
def exc_predicate(e):
return (e.op == c.op and e.op._original_op == b.op and
e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
@test_util.run_v1_only('b/120545219')
def testFetchAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
val1 = np.array([1.2, 3.4, 5.6])
val2 = np.array([[1, 2], [4, 3]])
val3 = np.array([10, 20, 30])
t1 = constant_op.constant(val1)
t2 = constant_op.constant(val2)
sample = SampleAttr(t1, t2)
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val1, result.field1)
self.assertAllEqual(val2, result.field2)
result = sess.run(sample, feed_dict={sample.field1: val3})
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val3, result.field1)
self.assertAllEqual(val2, result.field2)
@test_util.run_v1_only('b/120545219')
def testFetchNestedAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field0 = attr.ib()
field1 = attr.ib()
v1 = 10
v2 = 20
v3 = np.float32(1.2)
v4 = np.float32(3.4)
v5 = np.float64(100.001)
v6 = np.float64(-23.451)
arr1 = np.array([1.2, 6.7, 3.4])
arr2 = np.array([7, 11, 3])
sample = SampleAttr(
SampleAttr(
SampleAttr(constant_op.constant(v1), constant_op.constant(v2)),
SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))),
{'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)),
'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]})
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertIsInstance(result.field0, SampleAttr)
self.assertIsInstance(result.field0.field0, SampleAttr)
self.assertIsInstance(result.field0.field1, SampleAttr)
self.assertIsInstance(result.field0.field1.field0, np.ndarray)
self.assertAllEqual(arr1, result.field0.field1.field0)
self.assertIsInstance(result.field0.field1.field1, np.ndarray)
self.assertAllEqual(arr2, result.field0.field1.field1)
self.assertIsInstance(result.field1, dict)
self.assertIn('A', result.field1)
self.assertIn('B', result.field1)
self.assertIsInstance(result.field1['A'], SampleAttr)
self.assertAllEqual(
[v3, v4],
[result.field1['A'].field0, result.field1['A'].field1])
self.assertIsInstance(result.field1['B'], list)
self.assertEqual(1, len(result.field1['B']))
self.assertIsInstance(result.field1['B'][0], SampleAttr)
self.assertAllEqual(
[v5, v6],
[result.field1['B'][0].field0, result.field1['B'][0].field1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c),
ABC(a=a, b=b, c=c), {
'a': a.name,
'c': c,
'b': b
}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), {
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(
DEFG(
d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({
'd': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {
'a': a.name,
'c': c,
'b': b
}
})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices), constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
dtype=np.float32, shape=shape, name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: (values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: (values, indices)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
@test_util.run_v1_only('b/120545219')
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.VariableV1(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(
target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
@staticmethod
def _build_graph():
time.sleep(random.random() * 0.1)
# Do some graph construction. Try to exercise non-trivial paths.
graph = ops.get_default_graph()
gdef = None
for _ in range(10):
x = array_ops.placeholder(dtype=dtypes.float32)
with ops.colocate_with(x):
y = array_ops.placeholder(dtype=dtypes.float32)
with ops.device('/cpu:0'):
z = control_flow_ops.while_loop(
lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
gradients_impl.gradients(z, [x, y])
if gdef is None:
gdef = graph.as_graph_def()
else:
importer.import_graph_def(gdef, name='import')
@test_util.run_v1_only('b/120545219')
def testParallelRunAndSingleBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in threads:
t.start()
SessionTest._build_graph()
stop.set()
for t in threads:
t.join()
@test_util.run_v1_only('b/120545219')
def testParallelRunAndParallelBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
run_threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in run_threads:
t.start()
build_threads = [self.checkedThread(target=SessionTest._build_graph)
for _ in range(10)]
for t in build_threads:
t.start()
for t in build_threads:
t.join()
# Let the run_threads run until the build threads are finished.
stop.set()
for t in run_threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('versions { producer: %d min_consumer: %d }' %
(versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
@test_util.run_v1_only('b/120545219')
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
@test_util.run_v1_only('b/120545219')
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
@test_util.run_v1_only('b/120545219')
def testMultipleInteractiveSessionsWarning(self):
# Reinitialize the global state to ensure that the expected warnings will
# be emitted.
session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access
sess = session.InteractiveSession()
sess.run(constant_op.constant(4.0)) # Run so that the session is "opened".
sess.close()
# Opening and closing interactive sessions serially should not warn.
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
sess.close()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess2 = session.InteractiveSession()
self.assertEqual(1, len(w))
self.assertTrue('An interactive session is already active. This can cause '
'out-of-memory errors in some cases. You must explicitly '
'call `InteractiveSession.close()` to release resources '
'held by the other session(s).' in str(w[0].message))
sess2.close()
sess.close()
@test_util.run_v1_only('b/120545219')
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
@test_util.run_v1_only('b/120545219')
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool,
dtypes.complex64, dtypes.complex128
]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={
feed_t: np_array
}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={
feed_t: np_array
}))
# Also check that we can get both back.
out_v, feed_v = sess.run(
[out_t, feed_t], feed_dict={
feed_t: np_array
})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(42.0,
tensor_runner(
41.0,
options=run_options,
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testOptimizedMakeCallable(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
for _ in range(3):
callable_fn = sess._make_callable_from_options(callable_opts)
for _ in range(5):
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32)))
def testOptimizedMakeCallableWithRunMetadata(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
callable_fn = sess._make_callable_from_options(callable_opts)
run_metadata = config_pb2.RunMetadata()
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32),
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(
sess.run(feed_t, feed_dict={
feed_t: c_list
}), c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [
u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode',
u'\U0001f60e deal with it'
]
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(
constant_op.constant(1.0), options=None, run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0), options=run_options, run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'Input to reshape is a tensor with 4 values, '
'but the requested shape has 21'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config_pb = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config_pb)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config_pb = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config_pb) as sess:
with ops.device('/device:GPU:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(
d,
feed_dict={a: 1.0},
options=run_options,
run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
@test_util.run_v1_only('b/120545219')
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
@test_util.run_v1_only('b/120545219')
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config_pb) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.run_v1_only('b/120545219')
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.cached_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(
squared_tensor, feed_dict={
squared_tensor: np1 * np1
})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
if context.executing_eagerly():
context.set_log_device_placement(True)
with CaptureStderr() as log:
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
else:
# Passing the config to the server, but not the session should still
# result in logging device placement.
config_pb = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config_pb)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
sess.run(c)
# Ensure that we did log device placement.
self.assertTrue('/replica:0/task:0/device:CPU:0' in str(log), str(log))
@test_util.run_v1_only('b/120545219')
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config_pb) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
@test_util.run_v1_only('b/120545219')
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config_pb)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
# pylint: disable=protected-access
enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False)
enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False)
# pylint: enable=protected-access
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
@test_util.run_v1_only('b/120545219')
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
@test_util.run_v1_only('b/120545219')
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
def testOpenAndCloseGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target):
pass
def testOpenAndCloseSession(self):
with session.Session():
pass
@test_util.run_v1_only('b/120545219')
def testAutoConvertAndCheckData(self):
with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegexp(
TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})
@test_util.run_v1_only('b/120545219')
def testOptimizerOptions(self):
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
with ops.Graph().as_default():
sess = session.Session()
self.assertEqual(
sess._config.graph_options.rewrite_options.min_graph_nodes, -1)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/client/session_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.util import compat
VSpace = collections.namedtuple("VSpace", [
"aggregate_fn", "num_elements_fn", "zeros_fn", "ones_fn", "graph_shape_fn"
])
def imperative_grad(tape,
target,
sources,
output_gradients=None,
sources_raw=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Computes gradients from the imperatively defined tape on top of the stack.
Works by filtering the tape, computing how many downstream usages are of each
tensor and entry, and repeatedly applying backward functions until we have
gradients for all sources.
Args:
tape: the gradient tape which stores the trace.
target: either a Tensor or list of Tensors to be differentiated.
sources: list of Tensors for which we want gradients
output_gradients: if not None, a list of gradient provided for each Target,
or None if we are to use the target's computed downstream gradient.
sources_raw: if not None, a list of the source python objects from which the
sources were generated. Should have the same length as sources. Only needs
to be populated if unconnected_gradients is 'zero'.
unconnected_gradients: determines the value returned if the target and
sources are unconnected. When 'none' the value returned is None wheras when
'zero' a zero tensor in the same shape as the sources is returned.
Returns:
the gradient wrt each of the sources.
Raises:
ValueError: if the arguments are invalid.
RuntimeError: if something goes wrong.
"""
try:
unconnected_gradients = UnconnectedGradients(unconnected_gradients)
except ValueError:
raise ValueError(
"Unknown value for unconnected_gradients: %r" % unconnected_gradients)
return pywrap_tensorflow.TFE_Py_TapeGradient(
tape._tape, # pylint: disable=protected-access
target,
sources,
output_gradients,
sources_raw,
compat.as_str(unconnected_gradients.value))
|
tensorflow-master
|
tensorflow/python/eager/imperative_grad.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DefunCollectionTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name='Defun', function_decorator=function.defun),
dict(
testcase_name='DefFunction',
function_decorator=def_function.function))
def testCollectionValueAccess(self, function_decorator):
"""Read values from graph collections inside of defun."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection('x', x)
ops.add_to_collection('y', y)
@function_decorator
def fn():
x_const = constant_op.constant(ops.get_collection('x')[0])
y_const = constant_op.constant(ops.get_collection('y')[0])
z = math_ops.add(x_const, y_const)
ops.add_to_collection('z', 7)
return z
self.assertEqual(7, int(self.evaluate(fn())))
self.assertEquals(ops.get_collection('x'), [2])
self.assertEquals(ops.get_collection('y'), [5])
self.assertEquals(ops.get_collection('z'), [])
@parameterized.named_parameters(
dict(testcase_name='Defun', function_decorator=function.defun),
dict(
testcase_name='DefFunction',
function_decorator=def_function.function))
def testCollectionVariableValueAccess(self, function_decorator):
"""Read variable value from graph collections inside of defun."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
v = resource_variable_ops.ResourceVariable(1.0)
@function_decorator
def f():
return v.read_value()
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, float(self.evaluate(f())))
self.assertEquals(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 1)
def testCollectionVariableValueWrite(self):
"""Write variable value inside defun."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
@function.defun
def f():
v = resource_variable_ops.ResourceVariable(2.0)
return v
_ = f.get_concrete_function()
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, float(self.evaluate(f())))
self.assertEquals(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 1)
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
test.main()
|
tensorflow-master
|
tensorflow/python/eager/function_defun_collection_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
from multiprocessing.pool import ThreadPool
import sys
import weakref
from absl.testing import parameterized
import numpy
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function as tf_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.layers import convolutional
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.training import training_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
def total_function_cache(defined):
# pylint: disable=protected-access
return (set(defined._function_cache.primary)
| set(defined._function_cache.arg_relaxed))
# pylint: enable=protected-access
class MiniModel(keras_training.Model):
"""Minimal model for mnist.
Useful for testing and debugging on slow TPU simulators.
"""
def __init__(self):
super(MiniModel, self).__init__(name='')
self.fc = keras.layers.Dense(1, name='fc', kernel_initializer='ones',
bias_initializer='ones')
def call(self, inputs, training=True):
return self.fc(inputs)
class DefunnedMiniModel(MiniModel):
@function.defun
def call(self, inputs, training=True):
return super(DefunnedMiniModel, self).call(inputs, training=training)
def _example_indexed_slices_with_dense_shape():
return ops.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]),
constant_op.constant([2]))
def _example_indexed_slices_without_dense_shape():
return ops.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]))
class FunctionTest(test.TestCase, parameterized.TestCase):
def testBasic(self):
matmul = def_function.function(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
sq2 = matmul(sq, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])
def testVariable(self):
v1 = variables.Variable(1.0)
add = def_function.function(lambda x, v: x + v1 + v)
v2 = variables.Variable(1.0)
x = constant_op.constant(1.0)
r = add(x, v2)
self.assertEqual(3.0, self.evaluate(r))
def testExternalControlDependency(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
@function.defun
def f():
with ops.control_dependencies([op]):
return 1.0
self.evaluate(f())
self.assertAllEqual(self.evaluate(v), 2.0)
def testInputShapeFunctionRelaxation(self):
unknown_dim = [False]
@function.defun(experimental_relax_shapes=True)
def func(a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 1)
func(constant_op.constant([1.0]))
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
def testCaptureNonTrainableVariable(self):
v = variables.Variable(1.0, trainable=False)
@def_function.function
def f():
return v + 1
c = f.get_concrete_function()
self.assertEqual(len(list(c.graph.variables)), 1) # pylint: disable=g-generic-assert
def testNestedInputShapeFunctionRelaxation(self):
unknown_dim = [False]
@function.defun(experimental_relax_shapes=True)
def func(a_, b_=None):
del a_ # Only used to check which cache is used.
self.assertEqual(b_[0]._shape_tuple(), ())
if b_[1]._shape_tuple()[0] is None:
unknown_dim[0] = True
return b_[0] + 1
a = 'hi'
b0 = constant_op.constant(1.0)
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 1)
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
func(a, b_=[b0, constant_op.constant([1.0, 1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
unknown_dim[0] = False
# Now do the same except with a new a which is not a tensor; this should
# change the cache key.
a = 'bye'
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 3)
# Since we already marked a cache miss for a function with the same
# non-input signatures, here we will immediately start relaxing shapes.
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 3)
def testFunctionRelaxationLosesInnerDimWithKerasLayer(self):
layer = keras.layers.Dense(1)
fn = def_function.function(experimental_relax_shapes=True)(layer)
with self.captureWritesToStream(sys.stderr) as printed:
fn(array_ops.ones((3, 2)))
self.assertNotIn('ValueError', printed.contents())
with self.captureWritesToStream(sys.stderr) as printed:
# Use batch size 2 to trigger a second cache miss on the shape.
fn(array_ops.ones((2, 2)))
self.assertNotIn('ValueError', printed.contents())
# Shape relaxation passes TensorShape([None, None]), which causes layer
# matmul to fail, due to incompatible dims. What would have been a graph
# build time error (layer would complain about the inner dim being 4).
with self.captureWritesToStream(sys.stderr) as printed:
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'Matrix size-incompatible'):
fn(array_ops.ones((3, 4)))
def testNestedShapeFunctionRelaxation(self):
got_shape = [None]
# The inner function will go through shape relaxation because the shapes it
# receives will be [1], [2], [3], ...
@def_function.function(experimental_relax_shapes=True)
def bar(x_shape):
got_shape[0] = x_shape._shape_tuple()
return x_shape
# The outer function will not go through shape relaxation because the shapes
# it receives will be [1], [[1]], [[[1]]], ...
@def_function.function(experimental_relax_shapes=True)
def foo(ones):
return bar(array_ops.shape(ones))
for rank in range(1, 6):
x_shape = self.evaluate(foo(array_ops.ones([1] * rank)))
self.assertAllEqual(x_shape, [1] * rank)
if rank < 3:
self.assertEqual(got_shape[0], (rank,))
else:
self.assertEqual(got_shape[0], (None,))
def testNoHash(self):
@def_function.function()
def f(_):
return 1.0
with self.assertRaisesRegexp(TypeError, 'set'):
f(set([]))
def testFuncName(self):
@function.defun_with_attributes(attributes={'func_name': 'multiply'})
def add(x, y):
_ = x * y
return x + y
@function.defun
def add_2(x, y):
_ = x * y
return x + y
self.assertEqual(add._name, 'multiply')
self.assertEqual(add_2._name, 'add_2')
def testBasicGraphMode(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = sq(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedInputsGraphMode(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = a_times_b(pair({'a': t}, {'b': t}))
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputsGraphMode(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function()
def pairs_mul(pair_a, pair_b):
return pair(matmul(pair_a.a, pair_b.a), matmul(pair_a.b, pair_b.b))
a = constant_op.constant([[1.0, 2.0], [1.0, 2.0]])
b = constant_op.constant([[3.0, 4.0], [3.0, 4.0]])
out = pairs_mul(pair(a, b), pair(b, a))
expected = pair(math_ops.matmul(a, b).numpy(),
math_ops.matmul(b, a).numpy())
self.assertAllClose(out, expected)
def testGraphEagerIsolation(self):
@function.defun
def f():
self.v = variables.Variable(1.0)
return self.v.read_value()
self.assertAllEqual(f(), 1.0)
with ops.Graph().as_default():
self.assertEqual(f().shape, ())
def testBasicGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testInputSpecGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
sq_op = sq.get_concrete_function(
tensor_spec.TensorSpec((None, None), dtypes.float32))
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out1 = sq_op(t1)
self.assertAllEqual(out1, math_ops.matmul(t1, t1).numpy())
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out2 = sq_op(t2)
self.assertAllEqual(out2, math_ops.matmul(t2, t2).numpy())
def testNestedInputSpecGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(mats):
((a, b),) = mats
return matmul(a, b)
sq_op_autonamed = sq.get_concrete_function(
[(tensor_spec.TensorSpec((None, None), dtypes.float32),
tensor_spec.TensorSpec((None, None), dtypes.float32))])
self.assertEqual([None, None], sq_op_autonamed.output_shapes.as_list())
sq_op = sq.get_concrete_function(
[(tensor_spec.TensorSpec((None, None), dtypes.float32,
name='first_mat'),
tensor_spec.TensorSpec((None, None), dtypes.float32,
name='second_mat'))])
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.4, 2.4], [3.4, 4.4]])
out = sq_op(first_mat=t1, second_mat=t2)
self.assertAllEqual(out, math_ops.matmul(t1, t2).numpy())
self.assertAllEqual(sq_op_autonamed(t1, t2),
math_ops.matmul(t1, t2).numpy())
def testExecutingStatelessDefunConcurrently(self):
@def_function.function
def stateless(x):
return math_ops.multiply(2.0, x)
pool = ThreadPool()
inputs = [constant_op.constant(1.0 * x) for x in range(100)]
outputs = [float(out) for out in pool.map(stateless, inputs)]
expected = [float(2.0 * x) for x in inputs]
self.assertSequenceEqual(outputs, expected)
def testExecutingManyStatelessDefunsConcurrently(self):
@def_function.function
def stateless(x):
del x
return math_ops.multiply(2.0, 2.0)
pool = ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
outputs = [
float(out)
for out in pool.map(stateless, [object() for _ in range(100)])
]
expected = [4.0] * 100
self.assertSequenceEqual(outputs, expected)
def testExecutingStatefulDefunConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def stateful(x):
v.assign(x)
pool = ThreadPool()
inputs = [constant_op.constant(0.0)] * 100
pool.map(stateful, inputs)
self.assertEqual(float(v.read_value()), 0.0)
def testExecutingManyStatefulDefunsConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def stateful(x):
del x
return v.assign(0.0)
pool = ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
pool.map(stateful, [object() for _ in range(100)])
self.assertEqual(float(v.read_value()), 0.0)
def disabled_testRandomSeed(self):
@def_function.function
def f():
return random_ops.random_normal(())
random_seed.set_random_seed(1)
x = f()
self.assertNotEqual(x, f())
random_seed.set_random_seed(1)
self.assertAllEqual(f(), x)
def testNestedInputsGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = a_times_b.get_concrete_function(
pair(dict(a=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'a')),
dict(b=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'b'))))
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(a=t, b=t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return (matmul(a, a), {'b': constant_op.constant(1.0)})
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes,
(tensor_shape.TensorShape([2, 2]),
{'b': tensor_shape.TensorShape([])}))
self.assertEqual(sq_op.output_dtypes,
(dtypes.float32, {'b': dtypes.float32}))
(a, b) = sq_op(t)
self.assertAllEqual(a, math_ops.matmul(t, t).numpy())
self.assertAllEqual(b['b'].numpy(), 1.0)
def testGraphFunctionNoneOutput(self):
@def_function.function
def fn(unused_a, unused_b):
return None
x = constant_op.constant(1)
fn_op = fn.get_concrete_function(x, x)
self.assertEqual(fn_op.output_dtypes, None)
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
def testDefunNumpyArraysConvertedToTensors(self):
def f(x):
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x)
self.assertLen(total_function_cache(defined), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(total_function_cache(defined), 1)
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(numpy.ones([])).numpy())
self.assertEqual(0., defined(numpy.zeros([])).numpy())
self.assertEqual(1., defined(array_ops.ones([])).numpy())
self.assertEqual(0., defined(array_ops.zeros([])).numpy())
def testDefunCapturedInt32(self):
x = constant_op.constant(1, dtype=dtypes.int32)
@def_function.function
def add_int32s():
return x + x
self.assertEqual(2, int(add_int32s()))
def testDefunReadVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
return v.read_value()
self.assertEqual(1.0, float(f()))
def testDefunAssignAddVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
x = constant_op.constant(2.0)
@def_function.function
def test_assign_add():
v.assign_add(x)
return v.read_value()
self.assertEqual(3.0, float(test_assign_add()))
@test_util.run_in_graph_and_eager_modes
def testTensorInitializationInFunctionRaisesError(self):
error_msg = ('Tensor-typed variable initializers must either be '
'wrapped in an init_scope or callable.*')
@def_function.function
def tensor_init():
with self.assertRaisesRegexp(ValueError, error_msg):
resource_variable_ops.ResourceVariable(constant_op.constant(2.0))
tensor_init()
@test_util.run_in_graph_and_eager_modes
def testCallableTensorInitializationInFunction(self):
@def_function.function
def tensor_init():
self.v = resource_variable_ops.ResourceVariable(
lambda: constant_op.constant(2.0))
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
@test_util.also_run_as_tf_function
def testInitScopeTensorInitializationInFunction(self):
@def_function.function
def tensor_init():
with ops.init_scope():
const = constant_op.constant(2.0)
# Note: this variable bypasses tf.function's variable creation
# requirements by bypassing variable_creator_scope by using
# ResourceVariable instead of Variable.
self.v = resource_variable_ops.ResourceVariable(const)
return self.v.read_value()
value = tensor_init()
self.assertAllEqual(value, 2.0)
@test_util.run_in_graph_and_eager_modes
def testGetConcreteFunctionCreatesVariables(self):
v_holder = []
@def_function.function
def tensor_init():
if not v_holder:
v_holder.append(variables.Variable(5.))
return v_holder[0].read_value()
concrete = tensor_init.get_concrete_function()
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(5., self.evaluate(concrete()))
self.assertAllEqual(5., self.evaluate(tensor_init()))
def testFuncGraphCaptureByValue(self):
v = variables.Variable(1.0)
def trivial_function():
return v.read_value()
graph_function = function.Function(
trivial_function, 'test', capture_by_value=True)
self.assertAllEqual(graph_function(), 1.0)
v.assign(2.0)
self.assertAllEqual(graph_function(), 1.0)
def testFuncGraphCaptureByValueNested(self):
v = variables.Variable(1.0)
def trivial_function():
return control_flow_ops.cond(
array_ops.placeholder_with_default(True, ()),
v.read_value, v.read_value)
graph_function = function.Function(
trivial_function, 'test', capture_by_value=True)
self.assertAllEqual(graph_function(), 1.0)
v.assign(2.0)
self.assertAllEqual(graph_function(), 1.0)
def testDefunShapeInferenceWithCapturedResourceVariable(self):
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = def_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.scalar())
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testShapeInferenceForMoreSpecificInput(self):
def f(a):
return array_ops.reshape(a, [-1, 3])
signature = [tensor_spec.TensorSpec(None, dtypes.float32)]
compiled = def_function.function(f, input_signature=signature)
@def_function.function
def use_f():
inputs = array_ops.zeros([10, 10, 3])
self.assertAllEqual(f(inputs).shape, compiled(inputs).shape)
use_f()
def testFuncListAttr(self):
@function.defun
def test_function(val):
def fn1():
return array_ops.ones([10])
fn2 = lambda: array_ops.ones([10]) * 2
def fn3(x=3):
return array_ops.ones([10]) * x
fn4 = functools.partial(fn3, x=4)
fn5 = functools.partial(fn3, 5)
return gen_functional_ops.case(val, [], [dtypes.float32],
[function.defun(f).get_concrete_function()
for f in (fn1, fn2, fn3, fn4, fn5)])
ones = array_ops.ones([10])
self.assertAllEqual([ones], test_function(0))
self.assertAllEqual([ones * 2], test_function(1))
self.assertAllEqual([ones * 3], test_function(2))
self.assertAllEqual([ones * 4], test_function(3))
self.assertAllEqual([ones * 5], test_function(4))
self.assertAllEqual([ones * 5], test_function(22)) # default branch
@test_util.enable_control_flow_v2
def testVariableInLoopInFunction(self):
@function.defun
def test_function():
def loop_test(_):
return False
def loop_body(_):
return variable_scope.get_variable('a', shape=())
return control_flow_ops.while_loop(loop_test, loop_body, [0.0])
self.assertEqual(test_function().shape, [])
def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):
with context.graph_mode():
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = def_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.scalar())
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):
with context.graph_mode():
v = variables.Variable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# Check that shape inference works while creating the defun
compiled = def_function.function(f)
compiled()
def testDefunShapeInferenceWithCapturedTensorListInGraphMode(self):
with context.graph_mode():
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=ops.convert_to_tensor([], dtype=dtypes.int32))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(1.0))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(2.0))
def f():
tl, value = list_ops.tensor_list_pop_back(
tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.scalar())
return tl
compiled = def_function.function(f)
output_tensor_list = compiled()
_, value = list_ops.tensor_list_pop_back(
output_tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.scalar())
@test_util.run_in_graph_and_eager_modes
def testDefunForcesResourceVariables(self):
def variable_creator():
self.v = variables.Variable(0.0)
return self.v.read_value()
self.v = None
defined = function.defun(variable_creator)
defined() # Create the variable.
self.assertIsInstance(
self.v, resource_variable_ops.ResourceVariable)
def testRunMetadata(self):
@def_function.function
def f(x):
return x * x
with ops.device('cpu:0'):
context.enable_run_metadata()
f(constant_op.constant(1.0))
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
step_stats = run_metadata.step_stats
self.assertNotEmpty(step_stats.dev_stats)
cpu_stats = step_stats.dev_stats[0]
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
cpu_stats.device)
# Testing for at least 2 because the function call should generate at most
# one entry in the step_stats; the ops inside function can generate
# arbitrarily many (placeholders, return identities, etc, might be included
# or not in the future, so shouldn't be tested for exactly.
self.assertGreaterEqual(len(cpu_stats.node_stats), 2)
self.assertLen(run_metadata.partition_graphs, 1)
def testGraphModeCaptureVariable(self):
with context.graph_mode(), self.cached_session():
class HasAVar(object):
def __init__(self):
self.v = resource_variable_ops.ResourceVariable(1.0)
def call(self):
return self.v * 2
o = HasAVar()
self.evaluate(variables.global_variables_initializer())
call = def_function.function(o.call)
op = call()
self.assertAllEqual(self.evaluate(op), 2.0)
def testGraphModeManyFunctions(self):
with ops.Graph().as_default(), self.cached_session():
@def_function.function
def f(x):
return x * x
@def_function.function
def g(x):
return f(x) + 1
self.assertAllEqual(g(constant_op.constant(2.0)).eval(), 5.0)
def testDict(self):
@def_function.function
def f(x):
return {'name': x + 1}
self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)
def testTensorConversionWithDefun(self):
@def_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
self.assertAllEqual(5, f(constant_op.constant(2)))
def testTensorConversionCall(self):
@def_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
@def_function.function
def g(x):
return f(f(x))
self.assertAllEqual(8, g(constant_op.constant(2)))
def testCallShape(self):
@def_function.function
def f(x):
return x + 1
@def_function.function
def g(x):
x = f(x)
self.assertEqual(x.shape.as_list(), [])
return None
g(constant_op.constant(1.0))
def testNestedDefunWithNoOutputAndTapedInput(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@def_function.function
def f(x):
# This function intentionally takes a taped variable as input,
# but does not return any values
math_ops.add(x, three)
@def_function.function
def g(x):
y = math_ops.add(x, three)
f(y)
g(three)
def testGatherResourceWithDefun(self):
with ops.device('cpu:0'):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
defined = def_function.function(sum_gather)
self.assertAllEqual(sum_gather(), defined())
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1', ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor', sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
]) # pyformat: disable
def testReturnCompositeTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@def_function.function(input_signature=input_signature)
def f():
return input_ct
output_ct = f()
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
('RaggedTensorRaggedRank1WithSignature',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]},
[ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)]),
('RaggedTensorRaggedRank2WithSignature',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]},
[ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32)]),
('SparseTensorWithSignature',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]},
[sparse_tensor.SparseTensorSpec([None], dtypes.int32)]),
]) # pyformat: disable
def testCompositeAsArgumentTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@def_function.function(input_signature=input_signature)
def f(x):
return x
output_ct = f(input_ct)
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
@test_util.run_gpu_only
def testFunctionOnDevice(self):
x = constant_op.constant([1.]).gpu()
f = def_function.function(math_ops.add)
y = f(x, x).cpu()
self.assertAllEqual(y, [2.])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testFunctionWithResourcesOnDifferentDevices(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, gpu_result
defined = function.defun(sum_gather)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
self.assertAllEqual(expected, self.evaluate(defined()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testOpInFunctionWithConflictingResourceInputs(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='cpu')
v_also_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='also_cpu')
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='gpu')
@def_function.function
def resource_apply_adam():
training_ops.resource_apply_adam(
v_cpu.handle,
v_gpu.handle,
v_also_cpu.handle,
1.0, # beta1_power
1.0, # beta2_power
1.0, # learning_rate
1.0, # beta1
1.0, # beta2
1.0, # epsilon,
[1.0, 1.0, 1.0], # grad
False) # use_locking
return None
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'Cannot place the graph because a reference or resource edge connects '
'colocation groups with incompatible assigned devices'):
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(resource_apply_adam())
@test_util.run_gpu_only
def testFunctionHandlesInputsOnDifferentDevices(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = def_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = reshape(value, shape).cpu()
self.assertAllEqual(reshaped, [[1], [2]])
@test_util.run_gpu_only
def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = def_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.])
shape = constant_op.constant([2, 1]).gpu()
reshape(value, shape) # No error is raised
def testNoneOutput(self):
@def_function.function
def my_function(_):
return None
self.assertAllEqual(my_function(1), None)
def testNestedFunctions(self):
# TensorFlow function (which is what would be used in TensorFlow graph
# construction).
@tf_function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
@def_function.function
def add_one(x):
return add(x, 1)
self.assertAllEqual(3, add_one(constant_op.constant(2)))
def testVariableCaptureInNestedFunctions(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)
@def_function.function
def inner_read():
return v.read_value()
@def_function.function
def outer():
return inner_read()
self.assertEqual(1, int(outer()))
def testReturnCapturedEagerTensor(self):
t = constant_op.constant(1)
@def_function.function
def read():
return t
self.assertEqual(1, int(read()))
def testReturnCapturedGraphTensor(self):
with context.graph_mode(), self.cached_session():
t = constant_op.constant(1)
@def_function.function
def read():
return t
self.assertEqual(1, int(self.evaluate(read())))
def testSequenceInputs(self):
clip_by_global_norm = def_function.function(clip_ops.clip_by_global_norm)
t_list = [constant_op.constant(1.0), constant_op.constant(2.0)]
clipped_list, global_norm = clip_by_global_norm(t_list,
constant_op.constant(.2))
for t in clipped_list:
self.assertIsInstance(t, ops.Tensor)
self.assertIsInstance(global_norm, ops.Tensor)
def testNestedSequenceInputs(self):
def my_op(inputs):
a, b, c = inputs
e, f = b
g, h = e
return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c
my_eager_op = def_function.function(my_op)
ret = my_eager_op([
constant_op.constant(1), [(constant_op.constant(2),
constant_op.constant(3)),
constant_op.constant(4)],
constant_op.constant(5)
])
self.assertLen(ret, 2)
self.assertAllEqual(ret[0][0], 2)
self.assertAllEqual(ret[0][1][0][0], 8)
self.assertAllEqual(ret[0][1][0][1], 4)
self.assertIsInstance(ret[0][1][0], tuple)
self.assertAllEqual(ret[0][1][1], 6)
self.assertAllEqual(ret[0][2], 10)
self.assertAllEqual(ret[1], 15)
def testVariableNamesRespectNameScopesWithDefun(self):
@def_function.function
def create_variable():
with ops.name_scope('foo'):
v = resource_variable_ops.ResourceVariable(0.0, name='bar')
self.assertEqual(v.name, 'foo/bar:0')
create_variable()
def testVariableNamesRespectNameScopesWithDefunInGraph(self):
with context.graph_mode():
@def_function.function
def create_variable():
with ops.name_scope('foo'):
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar')
self.assertEqual(v.name, 'foo/bar:0')
with ops.get_default_graph().as_default():
create_variable()
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testLayerInDefun(self):
conv = convolutional.Conv2D(
filters=1,
kernel_size=2,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
@function.defun
def model(x):
return conv(x)
x = array_ops.ones([1, 2, 2, 1])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[[[4.0]]]], self.evaluate(y))
# Variable lifting is somewhat different between defun/tf.function, so testing
# device placement on both makes sense.
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
@test_util.run_in_graph_and_eager_modes
def testVariablesPlacedOnOutsideDevice(self, function_decorator):
class _Obj(object):
def __init__(self):
self.v = None
@function_decorator
def f(self):
if self.v is None:
self.v = variables.Variable(1.)
return self.v + 1.
has_device = _Obj()
with ops.device('cpu:0'):
has_device.f()
self.assertIn('CPU', has_device.v.device)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testDefunKerasModelCall(self):
model = MiniModel()
model.call = function.defun(model.call)
x = array_ops.ones([1, 2])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[3.0]], self.evaluate(y))
# Break the reference cycle between the MiniModel and the defun:
# `MiniModel` --(through its `call` method)--> `Function`
# `Function` --(instancemethod on `MiniModel`)--> `MiniModel`
del model.call
# Note: The ConfigProto below unfortunately only configures graph
# construction. Eager's configuration is controlled in `__main__`.
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
@test_util.run_v1_only('b/120545219')
def testDeviceAnnotationsRespected(self):
def multi_device_fn():
with ops.device('/cpu:0'):
s0 = test_ops.device_placement_op()
with ops.device('/cpu:1'):
s1 = test_ops.device_placement_op()
with ops.device('/cpu:2'):
s2 = test_ops.device_placement_op()
s3 = test_ops.device_placement_op()
return s0, s1, s2, s3
defined = function.defun(multi_device_fn)
outputs = self.evaluate(defined())
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
with ops.device('/cpu:3'):
outputs = self.evaluate(defined())
# All function definitions are agnostic to call site devices.
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:3'), outputs[3])
with ops.device('/cpu:0'):
outputs = self.evaluate(defined())
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:0'), outputs[3])
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={'CPU': 2}))
@test_util.run_v1_only('b/120545219')
def testCallingGraphFunctionOnDifferentDevice(self):
def func():
return constant_op.constant(0)
defined = def_function.function(func)
with ops.device('cpu:0'):
cpu_graph_function = defined.get_concrete_function()
with ops.device('cpu:0'):
self.assertEqual(
self.evaluate(cpu_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
with ops.device(None):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
default_graph_function = defined.get_concrete_function()
self.assertEqual(
self.evaluate(default_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(default_graph_function()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testColocateWithRespected(self):
# TODO(b/113291792): Use multiple CPUs instead of a GPU.
with ops.device('cpu:0'):
x = constant_op.constant(1.0)
with ops.device('gpu:0'):
y = constant_op.constant(1.0)
@def_function.function
def foo():
return test_ops.device_placement_op()
with ops.colocate_with(x):
self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo()))
with ops.colocate_with(y):
self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo()))
def testVariablesAreTracked(self):
v = resource_variable_ops.ResourceVariable(1.0)
def foo(x):
return v * x
defined = def_function.function(foo)
x = constant_op.constant([1.0])
self.assertEqual(1., self.evaluate(defined(x)))
v.assign(2.)
x = constant_op.constant([1.0, 2.0])
self.assertAllEqual([2., 4.], self.evaluate(defined(x)))
def testCacheObjectHashCollisions(self):
class Foo(object):
def __hash__(self):
return 42
def func(foo):
del foo
return
defined = function.defun(func)
defined(Foo())
self.assertLen(total_function_cache(defined), 1)
defined(Foo())
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorDtypeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([[1.0]], dtype=dtypes.complex128)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorShapeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([1.0], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorShapeDtypeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([1.0], dtype=dtypes.complex128)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorUnknownShapesCollisionRelaxedShapes(self):
def func(t):
return t + t
with context.graph_mode(), self.cached_session():
defined = function.defun(func, experimental_relax_shapes=True)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[])
defined(p)
self.assertLen(total_function_cache(defined), 1)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
defined(p)
self.assertLen(total_function_cache(defined), 2)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[2])
defined(p)
# Gradual shape relaxation is performed; and the common shape between
# [1] and [2] is one containing unknown dimensions.
self.assertLen(total_function_cache(defined), 2)
# pylint: disable=protected-access
self.assertLen(defined._function_cache.arg_relaxed_shapes, 1)
relaxed_shapes = (
list(defined._function_cache.arg_relaxed_shapes.values())[0])
self.assertEqual(len(relaxed_shapes), 1)
relaxed_shape = relaxed_shapes[0]
# pylint: enable=protected-access
self.assertEqual(relaxed_shape.rank, 1)
self.assertEqual(tensor_shape.dimension_value(relaxed_shape[0]), None)
t = constant_op.constant([1.0, 1.0, 1.0], dtype=dtypes.float32)
defined(t)
# Shape (3,) matches the relaxed shape TensorShape([None])
self.assertLen(total_function_cache(defined), 2)
def testPythonFunctionWithDefaultArgs(self):
def func(foo, bar=1, baz=2):
del foo
del bar
del baz
return
defined = function.defun(func)
defined(0, baz=20)
def cache_keys():
"""Sanitizes cache keys of non-input metadata."""
return tuple(key[0] for key in total_function_cache(defined))
# `True` corresponds to the fact that we're executing eagerly
self.assertIn(('URRRu', (0, 1, 20)), cache_keys())
defined(1) # bar=1, baz=2
self.assertIn(('URRRu', (1, 1, 2)), cache_keys())
# This matches the previous call.
defined(foo=1)
self.assertLen(total_function_cache(defined), 2)
defined(1, 2, 3)
self.assertLen(total_function_cache(defined), 3)
self.assertIn(('URRRu', (1, 2, 3)), cache_keys())
# This matches the previous call.
defined(1, bar=2, baz=3)
self.assertLen(total_function_cache(defined), 3)
# This matches the previous call.
defined(1, baz=3, bar=2)
self.assertLen(total_function_cache(defined), 3)
def testFunctoolsPartialUnwrappedCorrectly(self):
def full_function(a, b, c=3):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2)
defined = function.defun(partial)
func_a, func_b, func_c = defined(2)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureWithMatchingInputs(self):
def foo(a):
self.assertEqual(a.shape, (2,))
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([2])
self.assertAllEqual(a, defined(a))
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(a, defined.get_concrete_function()(a))
self.assertAllEqual(a, defined.get_concrete_function(a)(a))
self.assertAllEqual(a, defined.get_concrete_function(
tensor_spec.TensorSpec((2,), dtype=dtypes.float32))(a))
self.assertLen(total_function_cache(defined), 1)
def bar(a):
self.assertEqual(a._shape_tuple(), (2, None))
return a
signature = [tensor_spec.TensorSpec((2, None), dtypes.float32)]
defined = function.defun(bar, input_signature=signature)
a = array_ops.ones([2, 1])
out = defined(a)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out, a)
# Changing the second dimension shouldn't create a new function.
b = array_ops.ones([2, 3])
out = defined(b)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out, b)
def testInputSignatureWithCompatibleInputs(self):
rank2_spec = tensor_spec.TensorSpec(shape=(None, None),
dtype=dtypes.float32)
@function.defun(input_signature=[rank2_spec])
def func(a):
self.assertEqual([None, None], a.shape.as_list())
return array_ops.shape(a)
self.assertAllEqual([3, 1], func([[0], [1.0], [1]]))
self.assertAllEqual([2, 2], func(numpy.array([[1, 1], [2, 2]])))
with self.assertRaisesRegexp(ValueError, 'incompatible'):
func([0.0, 1.0, 2.0]) # Wrong shape.
with self.assertRaisesRegexp(ValueError, 'incompatible'):
func([['wrong dtype']])
def testNestedInputSignatures(self):
def expected_foo(a, b):
return [a, b]
@function.defun(input_signature=[
[tensor_spec.TensorSpec((2, None), dtypes.float32)] * 2,
tensor_spec.TensorSpec((1,), dtypes.float32),
])
def foo(a, b):
self.assertEqual(a[0]._shape_tuple(), (2, None))
self.assertEqual(a[1]._shape_tuple(), (2, None))
self.assertEqual(b._shape_tuple(), (1,))
return [a, b]
a = array_ops.ones([2, 1])
b = array_ops.ones([1])
expected = expected_foo([a, a], b)
out = foo([a, a], b)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], a)
self.assertAllEqual(out[1], b)
# Changing the unspecified dimensions shouldn't create a new function.
a = array_ops.ones([2, 3])
b = array_ops.ones([2, 5])
c = array_ops.ones([1])
expected = expected_foo([a, b], c)
out = foo([a, b], c)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
c = c.numpy().tolist()
out = foo([a, b], c)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
def testNestedInputSignaturesWithDict(self):
def expected_bar(a):
return a
@function.defun(input_signature=[{
'a': tensor_spec.TensorSpec((2, None), dtypes.float32),
'b': tensor_spec.TensorSpec((2, None), dtypes.float32),
'c': tensor_spec.TensorSpec((1,), dtypes.float32)}])
def bar(a):
self.assertEqual(a['a']._shape_tuple(), (2, None))
self.assertEqual(a['b']._shape_tuple(), (2, None))
self.assertEqual(a['c']._shape_tuple(), (1,))
return a
a = array_ops.ones([2, 3])
b = array_ops.ones([1])
inputs = {'a': a, 'b': a, 'c': b}
expected = expected_bar(inputs)
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
inputs = {'a': a, 'b': a, 'c': b}
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
def testInputSignatureMustBeSequenceOfTensorSpecs(self):
def foo(a, b):
del a
del b
# Signatures must consist exclusively of `TensorSpec` objects.
signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)]
with self.assertRaisesRegexp(TypeError, 'Invalid input_signature.*'):
def_function.function(foo, input_signature=signature)
# Signatures must be either lists or tuples on their outermost levels.
signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)}
with self.assertRaisesRegexp(TypeError, 'input_signature must be either a '
'tuple or a list.*'):
function.defun(foo, input_signature=signature)
@test_util.run_in_graph_and_eager_modes
def testInputsIncompatibleWithSignatureRaisesError(self):
def foo(a):
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = def_function.function(foo, input_signature=signature)
# Invalid shapes.
with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([3]))
with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([2, 1]))
# Wrong number of arguments.
with self.assertRaisesRegexp(TypeError, 'Received 2 argument\(s\)'):
defined(array_ops.ones([2]), array_ops.ones([2]))
with self.assertRaisesRegexp(ValueError,
'Structure of Python function inputs.*'):
defined()
with self.assertRaisesRegexp(ValueError,
'inputs incompatible with input_signature'):
defined.get_concrete_function(
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.float32))
def testInputsIncompatibleWithNestedSignatureRaisesError(self):
def foo(a, b):
return [a, b]
signature = [[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2,
[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([1])
with self.assertRaisesRegexp(ValueError,
'Structure of Python function inputs.*'):
defined([a, a, a], [a])
with self.assertRaisesRegexp(ValueError,
'Structure of Python function inputs.*'):
defined([a], [a, a, a])
defined([a, a], [a, a])
def testUnderspecifiedInputSignature(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
])
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
x = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError, 'only pass arguments'):
foo(x, training=True)
with self.assertRaisesRegexp(TypeError, 'only pass arguments'):
foo(x, training=False)
self.assertAllEqual(x.numpy(), foo(x).numpy())
def testInputSignatureWithPartialFunction(self):
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2.0)
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
defined = function.defun(partial, input_signature=signature)
x = constant_op.constant(2.0)
func_a, func_b, func_c = defined(x)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureConversionWithDefaultArg(self):
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
signature = [
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.bool),
]
defined = def_function.function(foo, input_signature=signature)
a = constant_op.constant(1.0)
self.assertAllEqual(a.numpy(), defined(a))
self.assertAllEqual(a.numpy(), defined(a, training=True))
self.assertAllEqual(-a.numpy(), defined(a, training=False))
def testInputSignatureWithKeywordPositionalArgs(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int64)
])
def foo(flt, integer):
return flt, integer
flt = constant_op.constant(1.0)
integer = constant_op.constant(2, dtypes.int64)
out1, out2 = foo(flt, integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt=flt, integer=integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(integer=integer, flt=flt)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt, integer=integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
def testInputSignatureWithKeywordArgs(self):
def foo(a, b, **kwargs):
del kwargs
return a, b
x = function.defun(
foo,
input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int32)
]).get_concrete_function()
result = x(constant_op.constant(5.0), constant_op.constant(5))
self.assertAllEqual(result, [5.0, 5])
def testInputSignatureWithCompositeTensors(self):
def f(rt):
self.assertEqual(rt.values.shape.as_list(), [None])
self.assertEqual(rt.row_splits.shape.as_list(), [4])
return rt
signature = [ragged_tensor.RaggedTensorSpec(
shape=[3, None], dtype=dtypes.int32)]
defined = function.defun(f, input_signature=signature)
rt1 = ragged_factory_ops.constant([[1], [], [2, 3, 4]])
out1 = defined(rt1)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out1.values, rt1.values)
self.assertAllEqual(out1.row_splits, rt1.row_splits)
# Changing the row lengths shouldn't create a new function.
rt2 = ragged_factory_ops.constant([[1, 2], [3, 4], [5]])
out2 = defined(rt2)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out2.values, rt2.values)
self.assertAllEqual(out2.row_splits, rt2.row_splits)
# Different number of rows
rt3 = ragged_factory_ops.constant([[1, 2], [3, 4], [5], [6]])
with self.assertRaisesRegexp(ValueError, 'incompatible'):
defined(rt3)
# Different dtype
rt4 = ragged_factory_ops.constant([[1.0, 2.0], [], [3.0]])
with self.assertRaisesRegexp(ValueError, 'Structure .* does not match'):
defined(rt4)
# Different rank
rt5 = ragged_factory_ops.constant([[[1]], [[2]], [[3]]])
with self.assertRaisesRegexp(ValueError, 'does not match'):
defined(rt5)
def testTensorKeywordArguments(self):
def foo(a, b):
del a
return b
defined = function.defun(foo)
a = constant_op.constant(2.0)
b = constant_op.constant([1.0, 2.0])
one = defined(a, b)
self.assertLen(total_function_cache(defined), 1)
two = defined(a=a, b=b)
self.assertLen(total_function_cache(defined), 1)
three = defined(b=b, a=a)
self.assertLen(total_function_cache(defined), 1)
four = defined(a, b=b)
self.assertLen(total_function_cache(defined), 1)
# The next call corresponds to a new input signature, hence
# we expect another function to be defined.
five = defined(b, a)
self.assertLen(total_function_cache(defined), 2)
six = defined(a=b, b=a)
self.assertLen(total_function_cache(defined), 2)
seven = defined(b=a, a=b)
self.assertLen(total_function_cache(defined), 2)
self.assertAllEqual(one, [1.0, 2.0])
self.assertAllEqual(two, [1.0, 2.0])
self.assertAllEqual(three, [1.0, 2.0])
self.assertAllEqual(four, [1.0, 2.0])
self.assertAllEqual(five, 2.0)
self.assertAllEqual(six, 2.0)
self.assertAllEqual(seven, 2.0)
def testDefuningInstanceMethod(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
def one(self, tensor):
return tensor
@def_function.function
def two(self, tensor, other=integer):
return self.one(tensor), other
foo = Foo()
t = constant_op.constant(1.0)
one, two = foo.two(t)
self.assertEqual(one.numpy(), 1.0)
self.assertEqual(two.numpy(), 2)
def testDefuningInstanceMethodWithDefaultArgument(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
@def_function.function
def func(self, other=integer):
return other
foo = Foo()
self.assertEqual(foo.func().numpy(), int(integer))
def testPythonCallWithSideEffects(self):
state = []
@def_function.function
def side_effecting_function():
state.append(0)
side_effecting_function()
self.assertAllEqual(state, [0])
# The second invocation should call the graph function, which shouldn't
# trigger the list append.
side_effecting_function()
self.assertAllEqual(state, [0])
# Whereas calling the python function directly should create a side-effect.
side_effecting_function.python_function()
self.assertAllEqual(state, [0, 0])
def testFunctionWithNestedFunctionCallAndSideEffects(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(1.0)
@def_function.function
def add_one(a):
a.assign_add(1.0)
# Grappler will inline calls to `add_one` into the function body, we check
# that all side-effects were executed.
@def_function.function
def side_effecting_function(a, b):
add_one(a)
add_one(b)
return a + b
result = side_effecting_function(v1, v2)
self.assertEqual(result.numpy(), 4.0)
def testFunctionWithExtraAttributes(self):
@function.defun_with_attributes(attributes={'experimental_1': 'value1',
'experimental_2': 2})
def matmul(x, y):
return math_ops.matmul(x, y)
def add(x, y):
return math_ops.add(x, y)
defun_add = function.defun_with_attributes(
add, attributes={'experimental_3': True, 'experimental_4': 1.0})
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t)
double = defun_add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 2)
functions = list(graph._functions.values())
self.assertRegexpMatches(
functions[0].definition.signature.name, '.*matmul.*')
attrs = functions[0].definition.attr
self.assertLen(attrs, 2)
self.assertEqual(attrs['experimental_1'].s, b'value1')
self.assertEqual(attrs['experimental_2'].i, 2)
self.assertRegexpMatches(
functions[1].definition.signature.name, '.*add.*')
attrs = functions[1].definition.attr
self.assertLen(attrs, 2)
self.assertEqual(attrs['experimental_3'].b, True)
self.assertEqual(attrs['experimental_4'].f, 1.0)
# pylint: enable=protected-access
def testFunctionWithInvalidAttribute(self):
@function.defun_with_attributes(attributes={'experimental_1': ['value1']})
def add(x, y):
return math_ops.add(x, y)
with self.assertRaisesRegexp(ValueError,
'.*Unsupported attribute type.*'):
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
add(t, t)
def testRegisterFunction(self):
@function.defun
def add(x, y):
return math_ops.add(x, y)
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
function.register(add, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*matmul.*',
'.*forward.*matmul.*',
'.*inference.*backward.*matmul.*',
'.*inference.*add.*',
'.*forward.*add.*',
'.*inference.*backward.*add.*',
]
for i in range(len(functions)):
self.assertRegexpMatches(captured_function_names[i],
expected_func_name_regex[i])
# Check the forward and backward function has the correct attributes.
self.assertEqual(
functions[1].definition.attr['backward_function_name'].s,
functions[2].name)
self.assertEqual(
functions[2].definition.attr['forward_function_name'].s,
functions[1].name)
self.assertEqual(
functions[4].definition.attr['backward_function_name'].s,
functions[5].name)
self.assertEqual(
functions[5].definition.attr['forward_function_name'].s,
functions[4].name)
sq = defun_matmul(t, t)
double = add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
functions = list(graph._functions.values())
for i in range(len(functions)):
self.assertEqual(captured_function_names[i],
functions[i].definition.signature.name)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testRegisterConcreteFunction(self, function_decorator):
@function_decorator
def py_add(x, y):
return math_ops.add(x, y)
py_add(array_ops.ones([]), array_ops.ones([]))
add = py_add.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
@function_decorator
def py_composite(x, y):
return x, add(x, y)
py_composite(array_ops.ones([]), array_ops.ones([]))
composite = py_composite.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
composite.add_to_graph(register_gradient_functions=True)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*py_composite.*',
'.*inference.*py_add.*',
'.*forward.*py_composite.*',
'.*forward.*py_add.*',
'.*inference.*backward.*py_composite.*',
'.*inference.*backward.*py_add.*',
]
for expected, found in zip(
expected_func_name_regex,
captured_function_names):
self.assertRegexpMatches(found, expected)
composite_t, composite_double = composite(t, t)
double = add(t, t)
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(double))
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(composite_double))
self.assertAllEqual([[1, 2], [3, 4]], self.evaluate(composite_t))
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
def testRegisterFunctionWithInputSignature(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(
matmul,
input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
# Test register function with cache, note inputs are ignored.
function.register(defun_matmul)
graph = ops.get_default_graph()
self.assertLen(graph._functions, 3)
def testRegisterFunctionWithCache(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]])
function.register(defun_matmul, t, t)
function.register(defun_matmul, t2, t2)
graph = ops.get_default_graph()
# Only one function is registered since the input param are in same type
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
def testCallingFunctionWithDifferentVariables(self):
@function.defun
def foo(v):
v.assign_add(1.0)
return v.read_value()
v = resource_variable_ops.ResourceVariable(0.0)
graph_function = foo.get_concrete_function(v)
self.assertLen(graph_function.inputs, 1)
self.assertEmpty(graph_function.captured_inputs)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(v)), 2.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun
def bar(v):
del v
return constant_op.constant(1.0)
graph_function = bar.get_concrete_function(v)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(w)), 1.0)
def testCallingFunctionWithNonTensorsFails(self):
@function.defun
def foo(x):
return x
graph_function = foo.get_concrete_function(constant_op.constant(1.0))
with self.assertRaisesRegexp(
ValueError, 'All inputs to `ConcreteFunction`s must be Tensors;.*'):
graph_function('Not a Tensor.')
def testSwapImplementationWithGrapplerPlugin(self):
# Set the min_graph_nodes to -1 since the graph in this test is too small,
# and will be ignored by grappler if don't set this.
rewrites = rewriter_config_pb2.RewriterConfig()
rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
rewrites.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrites, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
with context.graph_mode(), self.cached_session(
config=config, graph=ops.Graph(), use_gpu=True):
@function.defun_with_attributes(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'CPU'
})
def cpu_boost(x):
return math_ops.add(x, 2.0)
@function.defun_with_attributes(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'GPU'
})
def gpu_boost(x):
return math_ops.add(x, 4.0)
x = constant_op.constant(1.0)
function.register(cpu_boost, x)
y = gpu_boost(x)
y_value = self.evaluate(y)
if test.is_gpu_available():
self.assertEqual(y_value, 5.0)
else:
# Grappler fallback to use the CPU impl even called with GPU function.
self.assertEqual(y_value, 3.0)
def testSwapImplementationInEager(self):
if not context.executing_eagerly():
self.skipTest('eager only')
context.context().set_optimizer_experimental_options(
{'min_graph_nodes': -1, 'implementation_selector': True})
# TODO(b/133178886): Remove _noinline=True once the function is not default
# inlined in eager mode with api_implements attribute.
@function.defun_with_attributes(
attributes={'api_implements': 'foo',
'api_preferred_device': 'CPU',
'_noinline': True})
def on_cpu(x):
return x + 2
# TODO(b/133178886): Remove _noinline=True once the function is not default
# inlined in eager mode with api_implements attribute.
@function.defun_with_attributes(
attributes={'api_implements': 'foo',
'api_preferred_device': 'GPU',
'_noinline': True})
def on_gpu(x):
return x + 4
@function.defun
def run_on_cpu(t):
function.register(on_cpu, t)
with ops.device('CPU:0'):
return on_gpu(t)
# Expect to run the on_cpu branch, regardless whether gpu is available.
self.assertEqual(run_on_cpu(constant_op.constant(1)).numpy(), 3)
def testDefunFunctionSeparateGraphs(self):
with context.graph_mode():
@function.defun
def add(x):
return x + 5
@function.defun
def maybe_add(x, should_add):
if should_add:
return add(x)
else:
return x
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(total_function_cache(maybe_add), 1)
self.assertLen(total_function_cache(add), 1)
maybe_add(x, False)
self.assertLen(total_function_cache(maybe_add), 2)
self.assertLen(total_function_cache(add), 1)
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(total_function_cache(maybe_add), 3)
self.assertLen(total_function_cache(add), 2)
def testCacheKeyOverlappingShapes(self):
@function.defun
def defined(t):
return t
defined(array_ops.zeros([12, 1]))
self.assertLen(total_function_cache(defined), 1)
defined(array_ops.zeros([1, 21]))
self.assertLen(total_function_cache(defined), 2)
def testCacheKeyNestedLists(self):
@function.defun
def defined(l):
return l
a = constant_op.constant(1.)
b = constant_op.constant(2.)
c = constant_op.constant(3.)
defined([[a], b, c])
self.assertLen(total_function_cache(defined), 1)
defined([[a, b], c])
self.assertLen(total_function_cache(defined), 2)
def testDecoratedMethod(self):
m = DefunnedMiniModel()
instance_call_one = m.call(array_ops.ones([1, 2]), training=True)
instance_call_two = m.call(
inputs=array_ops.ones([1, 2]), training=True)
class_call = DefunnedMiniModel.call(m, array_ops.ones([1, 2]),
training=True)
self.assertAllEqual(instance_call_one, instance_call_two)
self.assertAllEqual(instance_call_one, class_call)
def testDecoratedMethodUniqueFunctionPerInstance(self):
m = DefunnedMiniModel()
n = DefunnedMiniModel()
class_method_one = DefunnedMiniModel.call
class_method_two = DefunnedMiniModel.call
m_method_one = m.call
m_method_two = m.call
n_method_one = n.call
n_method_two = n.call
self.assertEqual(class_method_one, class_method_two)
self.assertEqual(m_method_one, m_method_two)
self.assertEqual(n_method_one, n_method_two)
self.assertNotEqual(m.call, n.call)
def testDecoratedMethodInspect(self):
m = DefunnedMiniModel()
fullargspec = tf_inspect.getfullargspec(m.call)
self.assertIn('training', fullargspec.args)
def testDecoratedMethodGetConcreteFunction(self):
m = DefunnedMiniModel()
instance_call_one = m.call.get_concrete_function(
array_ops.ones([1, 2]), training=False)
instance_call_two = m.call.get_concrete_function(
inputs=array_ops.ones([1, 2]), training=False)
self.assertAllEqual(instance_call_one(array_ops.ones([1, 2])),
instance_call_two(array_ops.ones([1, 2])))
# Also make sure get_concrete_function works on the class method
DefunnedMiniModel.call.get_concrete_function(
m, array_ops.ones([1, 2]), training=False)
DefunnedMiniModel.call.get_concrete_function(
m, inputs=array_ops.ones([1, 2]), training=True)
def testFunctionModifiesInputList(self):
# Tests on `list` methods that do in place modification, except `list.sort`
# since it cannot even be "defunned" in the first place
def get_list():
return [constant_op.constant(0.), constant_op.constant(1.)]
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def append(l):
l.append(constant_op.constant(0.))
append(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def extend(l):
l.extend([constant_op.constant(0.)])
extend(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def insert(l):
l.insert(0, constant_op.constant(0.))
insert(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def pop(l):
l.pop()
pop(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def reverse(l):
l.reverse()
reverse(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def remove(l):
l.remove(l[0])
remove(get_list())
# `list.clear` is a method that is in Py3 but not Py2
if sys.version.startswith('3'):
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def clear(l):
l.clear()
clear(get_list())
# One last test for keyword arguments
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def kwdappend(**kwargs):
l = kwargs['l']
l.append(constant_op.constant(0.))
kwdappend(l=get_list())
def testFunctionModifiesInputDict(self):
def get_dict():
return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)}
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def clear(m):
m.clear()
clear(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def pop(m):
m.pop('t1')
pop(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def popitem(m):
m.popitem()
popitem(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def update(m):
m.update({'t1': constant_op.constant(3.)})
update(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def setdefault(m):
m.setdefault('t3', constant_op.constant(3.))
setdefault(get_dict())
def testFunctionModifiesInputNest(self):
# Test on functions that modify structure of nested input arguments
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@def_function.function
def modify(n):
n[0]['t1'].append(constant_op.constant(1.))
nested_input = [{
't1': [constant_op.constant(0.),
constant_op.constant(1.)],
},
constant_op.constant(2.)]
modify(nested_input)
with self.assertRaisesRegexp(ValueError, expected_msg):
# The flat list doesn't change whereas the true structure changes
@def_function.function
def modify_same_flat(n):
n[0].append(n[1].pop(0))
nested_input = [[constant_op.constant(0.)],
[constant_op.constant(1.),
constant_op.constant(2.)]]
modify_same_flat(nested_input)
def testDecoratedMethodVariableCleanup(self):
m = DefunnedMiniModel()
m(array_ops.ones([1, 2]))
weak_variables = weakref.WeakSet(m.variables)
self.assertLen(weak_variables, 2)
del m
self.assertEqual([], list(weak_variables))
def testExecutorType(self):
@function.defun
def add_five(x):
return x + 5
self.assertEqual(
5,
add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
with self.assertRaisesRegexp(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'):
with context.function_executor_type('NON_EXISTENT_EXECUTOR'):
add_five(constant_op.constant(0, dtype=dtypes.int32))
for executor_type in ('', 'DEFAULT', None):
with context.function_executor_type(executor_type):
self.assertAllEqual(
5,
add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
@test_util.assert_no_garbage_created
def testReferenceCycles(self):
fn = function.defun(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
def testFunctionStackInErrorMessage(self):
if context.executing_eagerly():
# TODO(b/122736651): Remove this skipTest once fixed.
self.skipTest('Error interpolation is not working when function is '
'invoked without PartitionedCallOp.')
@def_function.function()
def fn3(x):
return x + 2
@def_function.function()
def fn2(x):
check_ops.assert_equal(fn3(x), 3)
return 2
@def_function.function()
def fn(x):
return fn2(x)
with self.assertRaises(errors.InvalidArgumentError) as cm:
fn(2)
e = cm.exception
self.assertIn('fn -> fn2', e.message)
self.assertIn('node assert_equal/Assert/Assert (defined at', e.message)
self.assertNotIn('fn3', e.message)
@test_util.run_gpu_only
def testFunctionIsNotPinned(self):
"""Tests that functions aren't pinned to the CPU by the eager runtime."""
seed1, seed2 = 79, 25
shape = constant_op.constant([4, 7])
dtype = dtypes.float32
@def_function.function
def func():
with ops.device('GPU:0'):
return gen_random_ops.random_standard_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
with ops.device('GPU:0'):
x = func()
self.assertRegexpMatches(x.device, 'GPU')
@test_util.run_in_graph_and_eager_modes
def testShapeCaching(self):
@function.defun
def func(x):
return array_ops.shape(x)
@function.defun(
input_signature=[tensor_spec.TensorSpec([None, None], dtypes.float32)])
def calls_func(x):
return func(x)
self.assertAllEqual([1, 1], self.evaluate(func(array_ops.zeros([1, 1]))))
self.assertAllEqual([2, 2], self.evaluate(func(array_ops.zeros([2, 2]))))
self.assertAllEqual(
[3, 3],
self.evaluate(calls_func(array_ops.zeros([3, 3]))))
def testLimitedRetracing(self):
trace_count = [0]
@function.defun
def func(x):
trace_count[0] += 1
return x
for _ in range(50):
func(constant_op.constant(3.))
func(constant_op.constant(4.))
func(constant_op.constant([[1., 2.]]))
func(constant_op.constant([[]]))
func(constant_op.constant([[3., 4.], [5., 6.]]))
func(constant_op.constant([[3., 4.], [5., 6.], [7., 8.]]))
# Tracing more than twice per input doesn't make sense.
self.assertLess(trace_count[0], 13)
def testLimitedRetracingWithCompositeTensors(self):
trace_count = [0]
@def_function.function
def f(x):
trace_count[0] += 1
return x
for i in range(10):
f(ragged_factory_ops.constant([[1, 2], [i]]))
f(ragged_factory_ops.constant([[1, 2], [], [3, 4, 5]]))
f(ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]]))
self.assertEqual(trace_count[0], 3)
def test_concrete_function_shape_mismatch(self):
@def_function.function
def f(argument_name):
return argument_name + 1.
f_concrete = f.get_concrete_function(constant_op.constant([1.]))
# Calling a function from eager doesn't do any shape checking above what
# kernels do while executing.
self.assertAllEqual(
[2., 3.],
f_concrete(constant_op.constant([1., 2.])).numpy())
@def_function.function
def g():
f_concrete(constant_op.constant([1., 2.]))
with self.assertRaisesRegexp(ValueError, 'argument_name'):
g()
@test_util.run_in_graph_and_eager_modes
def test_shape_inference_with_symbolic_shapes(self):
@def_function.function
def _uses_symbolic_shapes(w, x, y):
x = array_ops.identity(x, name='name_collision')
x = array_ops.transpose(x, [1, 0, 2])
x_batch = array_ops.shape(x)[0]
y_batch = array_ops.shape(y)[0]
y *= w
n = y_batch // x_batch
return array_ops.reshape(y, [n, x_batch, -1])
conc = _uses_symbolic_shapes.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
@def_function.function
def _call_concrete():
c = constant_op.constant(1.)
array_ops.identity(c, name='name_collision')
output1 = conc(array_ops.ones([2]),
array_ops.ones([5, 4, 2]),
array_ops.ones([20, 2]))
self.assertEqual([5, 4, 2], output1.shape)
output2 = conc(array_ops.ones([3]),
array_ops.ones([5, 4, 3]),
array_ops.ones([40, 3]))
self.assertEqual([10, 4, 3], output2.shape)
return output1, output2
output1, output2 = _call_concrete()
self.assertEqual((5, 4, 2), self.evaluate(output1).shape)
self.assertEqual((10, 4, 3), self.evaluate(output2).shape)
def testAutoGraphContext(self):
@def_function.function
def test_fn():
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED)
prev_status = ag_ctx.control_status_ctx().status
test_fn()
self.assertEqual(ag_ctx.control_status_ctx().status, prev_status)
class MultiDeviceTest(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
def testMultiDeviceOutput(self):
"""Tests that functions can produce outputs on multiple devices."""
@function.defun
def func(a, b, transpose_a):
with ops.device('/device:CPU:0'):
m1 = math_ops.matmul(a, b, transpose_a=transpose_a)
with ops.device('/device:GPU:0'):
m2 = math_ops.matmul(a, b, transpose_a=transpose_a)
return m1, m2
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
m1, m2 = func(t, t, transpose_a=True)
self.assertAllEqual(m1.numpy(), [[10, 14], [14, 20]])
self.assertRegexpMatches(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), [[10, 14], [14, 20]])
self.assertRegexpMatches(m2.backing_device, 'GPU')
@test_util.run_gpu_only
def testEmptyBody(self):
@function.defun
def func(a, b):
return b, a
with ops.device('/device:CPU:0'):
a = constant_op.constant(3.0)
with ops.device('/device:GPU:0'):
b = constant_op.constant(5.0)
m1, m2 = func(a, b)
self.assertAllEqual(m1.numpy(), 5.0)
self.assertRegexpMatches(m1.backing_device, 'GPU')
self.assertAllEqual(m2.numpy(), 3.0)
self.assertRegexpMatches(m2.backing_device, 'CPU')
@test_util.run_gpu_only
def testMultiDeviceInt32(self):
"""Tests that multi-device functions can take and output INT32s.
When an INT32 device tensor is fed into a function, it is copied to CPU
by the eager runtime. The function sees all INT32 inputs on CPU.
We set allocator attribute 'on_host' for INT32 outputs. They can be
partitioned into the GPU component function, but will be allocated on
CPU nevertheless.
There is experimental support for `ints_on_device` in
FunctionLibraryRuntime now. We can try that.
"""
with ops.device('/device:CPU:0'):
int_cpu = constant_op.constant(3, dtype=dtypes.int32)
resource = resource_variable_ops.ResourceVariable(5, dtype=dtypes.int32)
with ops.device('/device:GPU:0'):
int_gpu = constant_op.constant(7, dtype=dtypes.int32)
@function.defun
def func(int_cpu, resource, int_gpu):
with ops.device('/device:CPU:0'):
m1 = int_cpu * resource + int_gpu
with ops.device('/device:GPU:0'):
# This computation will happen on GPU but m2 will be copied to CPU.
m2 = int_gpu * resource + int_cpu + 1
return m1, m2
m1, m2 = func(int_cpu, resource, int_gpu)
self.assertAllEqual(m1.numpy(), 22)
self.assertRegexpMatches(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), 39)
self.assertRegexpMatches(m2.backing_device, 'CPU')
# flip arguments
m1, m2 = func(int_gpu, resource, int_cpu)
self.assertAllEqual(m1.numpy(), 38)
self.assertRegexpMatches(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), 23)
self.assertRegexpMatches(m2.backing_device, 'CPU')
@test_util.run_gpu_only
def testMultiDeviceColocateWith(self):
"""Tests that function's outputs respect colocation constraints."""
@function.defun
def func(a, b):
with ops.colocate_with(a):
ra = 2 * a
with ops.colocate_with(b):
rb = 3 * b
return ra, rb
devices = ['/device:CPU:0', '/device:GPU:0']
for dev1, dev2 in itertools.product(devices, devices):
with ops.device(dev1):
a = constant_op.constant(1.0)
with ops.device(dev2):
b = constant_op.constant(10.0)
ra, rb = func(a, b)
self.assertEqual(ra.numpy(), 2.0)
self.assertRegexpMatches(ra.backing_device, dev1)
self.assertEqual(rb.numpy(), 30.0)
self.assertRegexpMatches(rb.backing_device, dev2)
@test_util.run_gpu_only
def testMultiDeviceResources(self):
with ops.device('/device:CPU:0'):
c1 = resource_variable_ops.ResourceVariable(2.0)
c2 = resource_variable_ops.ResourceVariable(7.0)
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
g2 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def func(resource1, resource2):
with ops.device('/device:CPU:0'):
result1 = resource1 * g2
with ops.device('/device:GPU:0'):
result2 = resource2 * c2
return result1, result2
r1, r2 = func(c1, g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegexpMatches(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 21.0)
self.assertRegexpMatches(r2.backing_device, 'GPU')
# Call with flipped inputs. Check that we look at resource's
# device and reinstantiates the function when inputs' devices change.
r1, r2 = func(g1, c1)
self.assertEqual(r1.numpy(), 15.0)
self.assertRegexpMatches(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 14.0)
self.assertRegexpMatches(r2.backing_device, 'GPU')
@test_util.run_gpu_only
def testOutputResources(self):
with ops.device('/device:CPU:0'):
c1 = resource_variable_ops.ResourceVariable(2.0)
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun
def func(resource1, resource2):
with ops.device('/device:CPU:0'):
result1 = resource1 * 5
with ops.device('/device:GPU:0'):
result2 = resource2 * 7
return result1, resource1.handle, result2, resource2.handle
r1, res1, r2, res2 = func(c1, g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegexpMatches(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 21.0)
self.assertRegexpMatches(r2.backing_device, 'GPU')
def check_handle(handle, expected_value):
self.assertRegexpMatches(handle.backing_device, 'CPU')
tensor = gen_resource_variable_ops.read_variable_op(
handle, dtypes.float32)
self.assertEqual(tensor.numpy(), expected_value)
# Check that handles returned from functions are on CPU and an op using
# the resource handle is correctly placed on the device backing the
# resource.
check_handle(res1, 2.0)
check_handle(res2, 3.0)
# Call with flipped inputs to make sure the same the function is
# reinstantiated and eager runtime does not mess up the device assignment
# for ops consuming handles returned from defuns.
r1, res1, r2, res2 = func(g1, c1)
self.assertEqual(r1.numpy(), 15.0)
self.assertRegexpMatches(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 14.0)
self.assertRegexpMatches(r2.backing_device, 'GPU')
check_handle(res1, 3.0)
check_handle(res2, 2.0)
@test_util.run_gpu_only
def testComplexInputOutputDevicePattern(self):
"""Tests input/output mapping logic in partitioning."""
with ops.device('/device:CPU:0'):
rc0 = resource_variable_ops.ResourceVariable(2.0)
rc1 = resource_variable_ops.ResourceVariable(3.0)
cc0 = constant_op.constant(5.0)
cc1 = constant_op.constant(7.0)
with ops.device('/device:GPU:0'):
rg0 = resource_variable_ops.ResourceVariable(11.0)
rg1 = resource_variable_ops.ResourceVariable(13.0)
cg0 = constant_op.constant(17.0)
cg1 = constant_op.constant(19.0)
# Make sure tensors are on expected devices.
for tensor in [cc0, cc1]:
self.assertRegexpMatches(tensor.backing_device, 'CPU:0')
for tensor in [cg0, cg1]:
self.assertRegexpMatches(tensor.backing_device, 'GPU:0')
@function.defun
def func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1):
with ops.device('/device:CPU:0'):
m1 = rc0 * cg0
with ops.device('/device:GPU:0'):
m2 = rg0 * cc0
with ops.device('/device:CPU:0'):
r1 = 1000.0 * m2 + rc1 * cg1
with ops.device('/device:GPU:0'):
r2 = 1000.0 * m1 + rg1 * cc1
return r1, r2, m2, m1
r1, r2, m2, m1 = func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1)
self.assertRegexpMatches(m1.backing_device, 'CPU')
self.assertRegexpMatches(r1.backing_device, 'CPU')
self.assertRegexpMatches(m2.backing_device, 'GPU')
self.assertRegexpMatches(r2.backing_device, 'GPU')
self.assertEqual(m1.numpy(), 34.0)
self.assertEqual(r1.numpy(), 55000.0 + 3.0 * 19.0)
self.assertEqual(m2.numpy(), 55.0)
self.assertEqual(r2.numpy(), 34000.0 + 13.0 * 7.0)
@test_util.run_gpu_only
def testArgumentPrunning(self):
"""Tests functions taking unnecessary arguments."""
with ops.device('/device:CPU:0'):
c1 = constant_op.constant(5.0)
c2 = constant_op.constant(7.0)
with ops.device('/device:GPU:0'):
g1 = constant_op.constant(11.0)
g2 = constant_op.constant(13.0)
g3 = constant_op.constant(17.0)
@function.defun
def func(g1, g2, c1, g3, c2): # pylint: disable=unused-argument
# arguments g1 and g2 are unused and can be pruned by grappler.
return c1 * g3 * c2
result = func(g1, g2, c1, g3, c2)
self.assertEqual(result.numpy(), 5.0 * 7.0 * 17.0)
def testNestedCallWatchedVariables(self):
v = variables.Variable(4.)
@def_function.function
def f():
return v ** 2.
with backprop.GradientTape() as tape:
f()
self.assertEqual((v,), tape.watched_variables())
@def_function.function
def g():
return f()
with backprop.GradientTape() as tape:
g()
self.assertEqual((v,), tape.watched_variables())
# f() can rely on the variable being read during its trace. g() checks that
# variables from a function which knows about them are recorded on the
# tape. h() tests that functions forward knowledge of variables to callers.
@def_function.function
def h():
return g()
with backprop.GradientTape() as tape:
h()
self.assertEqual((v,), tape.watched_variables())
def testStandardTrainingLoopInFunction(self):
layer = core.Dense(2)
dataset = (
dataset_ops.DatasetV2.from_tensors(
(array_ops.ones([784]), array_ops.ones([], dtypes.int32)))
.map(lambda x, y: (x, y))
.repeat(10)
.batch(32))
optimizer = adam.Adam()
@def_function.function
def train():
for x, y in dataset:
with backprop.GradientTape() as tape:
out = layer(x)
loss = math_ops.reduce_mean(
nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=out, labels=y))
layer_variables = layer.trainable_variables
gradients = tape.gradient(loss, layer_variables)
optimizer.apply_gradients(zip(gradients, layer_variables))
train()
def testEarlyStoppingTrainingLoopInFunction(self):
layer = core.Dense(2)
dataset = (
dataset_ops.DatasetV2.from_tensors(
(array_ops.ones([784]), array_ops.ones([], dtypes.int32)))
.map(lambda x, y: (x, y))
.repeat(10)
.batch(32))
optimizer = adam.Adam()
@def_function.function
def train():
for x, y in dataset:
with backprop.GradientTape() as tape:
out = layer(x)
loss = math_ops.reduce_mean(
nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=out, labels=y))
layer_variables = layer.trainable_variables
gradients = tape.gradient(loss, layer_variables)
optimizer.apply_gradients(zip(gradients, layer_variables))
if optimizer.iterations > 3:
break
train()
def testDeferredCapture(self):
value = 1.0
@def_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(None))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# After changing the value of `value` the function call should return a
# different result.
value = 2.0
self.assertAllEqual(lazy_capture(2.0), 4.0)
def testDeferredCaptureWithKey(self):
value0 = 1.0
value1 = 2.0
@def_function.function
def lazy_capture(x):
w = ops.get_default_graph().capture_call_time_value(
lambda: value0, tensor_spec.TensorSpec(None), key=0)
y = ops.get_default_graph().capture_call_time_value(
lambda: value1, tensor_spec.TensorSpec(None), key=1)
def bad_closure():
raise ValueError('Should not run')
z = ops.get_default_graph().capture_call_time_value(
bad_closure, tensor_spec.TensorSpec(None), key=1)
return x + y + w + z
self.assertAllEqual(lazy_capture(2.0), 7.0)
value0 = 2.0
value1 = 3.0
self.assertAllEqual(lazy_capture(2.0), 10.0)
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
test.main()
|
tensorflow-master
|
tensorflow/python/eager/function_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for eager execution_callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import execution_callbacks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
RAISE = execution_callbacks.ExecutionCallback.RAISE
IGNORE = execution_callbacks.ExecutionCallback.IGNORE
def log_zero():
"""Computes `log(0.0)`."""
return math_ops.log(constant_op.constant(0.))
class ExecutionCallbacksTest(test.TestCase):
def test_errstate_inf_raise(self):
with execution_callbacks.errstate(inf_or_nan=RAISE):
with self.assertRaises(execution_callbacks.InfOrNanError):
log_zero()
def test_errstate_inf_ignore(self):
with execution_callbacks.errstate(inf_or_nan=IGNORE):
self.assertEqual(-float("inf"), log_zero().numpy())
def test_errstate_nesting(self):
with execution_callbacks.errstate(inf_or_nan=RAISE):
with execution_callbacks.errstate(inf_or_nan=IGNORE):
self.assertEqual(-float("inf"), log_zero().numpy())
with self.assertRaises(execution_callbacks.InfOrNanError):
log_zero()
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/eager/execution_callbacks_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to connect to remote servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf.cluster_pb2 import ClusterDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export("config.experimental_connect_to_host")
def connect_to_remote_host(remote_host=None, job_name="worker"):
"""Connects to a single machine to enable remote execution on it.
Will make devices on the remote host available to use. Note that calling this
more than once will work, but will invalidate any tensor handles on the old
remote devices.
Using the default job_name of worker, you can schedule ops to run remotely as
follows:
```python
# Enable eager execution, and connect to the remote host.
tf.compat.v1.enable_eager_execution()
tf.contrib.eager.connect_to_remote_host("exampleaddr.com:9876")
with ops.device("job:worker/replica:0/task:1/device:CPU:0"):
# The following tensors should be resident on the remote device, and the op
# will also execute remotely.
x1 = array_ops.ones([2, 2])
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
```
Args:
remote_host: a single or a list the remote server addr in host-port format.
job_name: The job name under which the new server will be accessible.
Raises:
ValueError: if remote_host is None.
"""
if not remote_host:
raise ValueError("Must provide at least one remote_host")
remote_host = nest.flatten(remote_host)
grpc_prefix = "grpc://"
local_port = pywrap_tensorflow.TF_PickUnusedPortOrDie()
cluster_def = ClusterDef()
job_def = cluster_def.job.add()
job_def.name = "localhost"
# TODO(fishx): Update this to make sure remote worker has valid ip address
# to connect with local.
job_def.tasks[0] = "localhost:{}".format(local_port)
job_def = cluster_def.job.add()
job_def.name = job_name
for i in range(len(remote_host)):
if remote_host[i].startswith(grpc_prefix):
job_def.tasks[i] = remote_host[i][len(grpc_prefix):]
else:
job_def.tasks[i] = remote_host[i]
server_def = ServerDef(
cluster=cluster_def, job_name="localhost", task_index=0, protocol="grpc")
# TODO(nareshmodi): Make this default since it works in more situations.
os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1"
context.set_server_def(server_def)
|
tensorflow-master
|
tensorflow/python/eager/remote.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitoring."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import test
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
class MonitoringTest(test_util.TensorFlowTestCase):
def test_counter(self):
counter = monitoring.Counter('test/counter', 'test counter')
counter.get_cell().increase_by(1)
self.assertEqual(counter.get_cell().value(), 1)
counter.get_cell().increase_by(5)
self.assertEqual(counter.get_cell().value(), 6)
def test_multiple_counters(self):
counter1 = monitoring.Counter('test/counter1', 'test counter', 'label1')
counter1.get_cell('foo').increase_by(1)
self.assertEqual(counter1.get_cell('foo').value(), 1)
counter2 = monitoring.Counter('test/counter2', 'test counter', 'label1',
'label2')
counter2.get_cell('foo', 'bar').increase_by(5)
self.assertEqual(counter2.get_cell('foo', 'bar').value(), 5)
def test_same_counter(self):
counter1 = monitoring.Counter('test/same_counter', 'test counter') # pylint: disable=unused-variable
with self.assertRaises(errors.AlreadyExistsError):
counter2 = monitoring.Counter('test/same_counter', 'test counter') # pylint: disable=unused-variable
def test_int_gauge(self):
gauge = monitoring.IntGauge('test/gauge', 'test gauge')
gauge.get_cell().set(1)
self.assertEqual(gauge.get_cell().value(), 1)
gauge.get_cell().set(5)
self.assertEqual(gauge.get_cell().value(), 5)
gauge1 = monitoring.IntGauge('test/gauge1', 'test gauge1', 'label1')
gauge1.get_cell('foo').set(2)
self.assertEqual(gauge1.get_cell('foo').value(), 2)
def test_string_gauge(self):
gauge = monitoring.StringGauge('test/gauge', 'test gauge')
gauge.get_cell().set('left')
self.assertEqual(gauge.get_cell().value(), 'left')
gauge.get_cell().set('right')
self.assertEqual(gauge.get_cell().value(), 'right')
gauge1 = monitoring.StringGauge('test/gauge1', 'test gauge1', 'label1')
gauge1.get_cell('foo').set('start')
self.assertEqual(gauge1.get_cell('foo').value(), 'start')
def test_bool_gauge(self):
gauge = monitoring.BoolGauge('test/gauge', 'test gauge')
gauge.get_cell().set(True)
self.assertTrue(gauge.get_cell().value())
gauge.get_cell().set(False)
self.assertFalse(gauge.get_cell().value())
gauge1 = monitoring.BoolGauge('test/gauge1', 'test gauge1', 'label1')
gauge1.get_cell('foo').set(True)
self.assertTrue(gauge1.get_cell('foo').value())
def test_sampler(self):
buckets = monitoring.ExponentialBuckets(1.0, 2.0, 2)
sampler = monitoring.Sampler('test/sampler', buckets, 'test sampler')
sampler.get_cell().add(1.0)
sampler.get_cell().add(5.0)
histogram_proto = sampler.get_cell().value()
self.assertEqual(histogram_proto.min, 1.0)
self.assertEqual(histogram_proto.num, 2.0)
self.assertEqual(histogram_proto.sum, 6.0)
sampler1 = monitoring.Sampler('test/sampler1', buckets, 'test sampler',
'label1')
sampler1.get_cell('foo').add(2.0)
sampler1.get_cell('foo').add(4.0)
sampler1.get_cell('bar').add(8.0)
histogram_proto1 = sampler1.get_cell('foo').value()
self.assertEqual(histogram_proto1.max, 4.0)
self.assertEqual(histogram_proto1.num, 2.0)
self.assertEqual(histogram_proto1.sum, 6.0)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/monitoring_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow monitoring APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.core.framework import summary_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.util import compat
_MetricMethod = collections.namedtuple('MetricMethod', 'create delete get_cell')
_counter_methods = [
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewCounter0,
delete=pywrap_tensorflow.TFE_MonitoringDeleteCounter0,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellCounter0),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewCounter1,
delete=pywrap_tensorflow.TFE_MonitoringDeleteCounter1,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellCounter1),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewCounter2,
delete=pywrap_tensorflow.TFE_MonitoringDeleteCounter2,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellCounter2),
]
_int_gauge_methods = [
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewIntGauge0,
delete=pywrap_tensorflow.TFE_MonitoringDeleteIntGauge0,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellIntGauge0),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewIntGauge1,
delete=pywrap_tensorflow.TFE_MonitoringDeleteIntGauge1,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellIntGauge1),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewIntGauge2,
delete=pywrap_tensorflow.TFE_MonitoringDeleteIntGauge2,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellIntGauge2),
]
_string_gauge_methods = [
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewStringGauge0,
delete=pywrap_tensorflow.TFE_MonitoringDeleteStringGauge0,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellStringGauge0),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewStringGauge1,
delete=pywrap_tensorflow.TFE_MonitoringDeleteStringGauge1,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellStringGauge1),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewStringGauge2,
delete=pywrap_tensorflow.TFE_MonitoringDeleteStringGauge2,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellStringGauge2),
]
_bool_gauge_methods = [
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewBoolGauge0,
delete=pywrap_tensorflow.TFE_MonitoringDeleteBoolGauge0,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellBoolGauge0),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewBoolGauge1,
delete=pywrap_tensorflow.TFE_MonitoringDeleteBoolGauge1,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellBoolGauge1),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewBoolGauge2,
delete=pywrap_tensorflow.TFE_MonitoringDeleteBoolGauge2,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellBoolGauge2),
]
_sampler_methods = [
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewSampler0,
delete=pywrap_tensorflow.TFE_MonitoringDeleteSampler0,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellSampler0),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewSampler1,
delete=pywrap_tensorflow.TFE_MonitoringDeleteSampler1,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellSampler1),
_MetricMethod(
create=pywrap_tensorflow.TFE_MonitoringNewSampler2,
delete=pywrap_tensorflow.TFE_MonitoringDeleteSampler2,
get_cell=pywrap_tensorflow.TFE_MonitoringGetCellSampler2),
]
class Metric(object):
"""The base class of metric."""
def __init__(self, metric_name, metric_methods, label_length, *args):
"""Creates a new metric.
Args:
metric_name: name of the metric class.
metric_methods: list of swig metric methods.
label_length: length of label args.
*args: the arguments to call create method.
"""
self._metric_name = metric_name
self._metric_methods = metric_methods
self._label_length = label_length
if label_length >= len(self._metric_methods):
raise ValueError('Cannot create {} metric with label >= {}'.format(
self._metric_name, len(self._metric_methods)))
self._metric = self._metric_methods[self._label_length].create(*args)
def __del__(self):
if hasattr(self, '_metric'):
deleter = self._metric_methods[self._label_length].delete
if deleter is not None:
deleter(self._metric)
def get_cell(self, *labels):
"""Retrieves the cell."""
if len(labels) != self._label_length:
raise ValueError('The {} expects taking {} labels'.format(
self._metric_name, self._label_length))
return self._metric_methods[self._label_length].get_cell(
self._metric, *labels)
class CounterCell(object):
"""CounterCell stores each value of a Counter."""
def __init__(self, cell):
"""Creates a new CounterCell.
Args:
cell: A c pointer of TFE_MonitoringCounterCell.
"""
self._cell = cell
def increase_by(self, value):
"""Atomically increments the value.
Args:
value: non-negative value.
"""
pywrap_tensorflow.TFE_MonitoringCounterCellIncrementBy(self._cell, value)
def value(self):
"""Retrieves the current value."""
return pywrap_tensorflow.TFE_MonitoringCounterCellValue(self._cell)
class Counter(Metric):
"""A stateful class for updating a cumulative integer metric.
This class encapsulates a set of values (or a single value for a label-less
metric). Each value is identified by a tuple of labels. The class allows the
user to increment each value.
"""
def __init__(self, name, description, *labels):
"""Creates a new Counter.
Args:
name: name of the new metric.
description: description of the new metric.
*labels: The label list of the new metric.
"""
super(Counter, self).__init__('Counter', _counter_methods, len(labels),
name, description, *labels)
def get_cell(self, *labels):
"""Retrieves the cell."""
return CounterCell(super(Counter, self).get_cell(*labels))
class IntGaugeCell(object):
"""A single integer value stored in an `IntGauge`."""
def __init__(self, cell):
"""Creates a new IntGaugeCell.
Args:
cell: A c pointer of TFE_MonitoringIntGaugeCell.
"""
self._cell = cell
def set(self, value):
"""Atomically set the value.
Args:
value: integer value.
"""
pywrap_tensorflow.TFE_MonitoringIntGaugeCellSet(self._cell, value)
def value(self):
"""Retrieves the current value."""
return pywrap_tensorflow.TFE_MonitoringIntGaugeCellValue(self._cell)
class IntGauge(Metric):
"""A stateful class for updating a gauge-like integer metric.
This class encapsulates a set of integer values (or a single value for a
label-less metric). Each value is identified by a tuple of labels. The class
allows the user to set each value.
"""
def __init__(self, name, description, *labels):
"""Creates a new IntGauge.
Args:
name: name of the new metric.
description: description of the new metric.
*labels: The label list of the new metric.
"""
super(IntGauge, self).__init__('IntGauge', _int_gauge_methods, len(labels),
name, description, *labels)
def get_cell(self, *labels):
"""Retrieves the cell."""
return IntGaugeCell(super(IntGauge, self).get_cell(*labels))
class StringGaugeCell(object):
"""A single string value stored in an `StringGauge`."""
def __init__(self, cell):
"""Creates a new StringGaugeCell.
Args:
cell: A c pointer of TFE_MonitoringStringGaugeCell.
"""
self._cell = cell
def set(self, value):
"""Atomically set the value.
Args:
value: string value.
"""
pywrap_tensorflow.TFE_MonitoringStringGaugeCellSet(self._cell, value)
def value(self):
"""Retrieves the current value."""
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_MonitoringStringGaugeCellValue(self._cell, buffer_)
value = pywrap_tensorflow.TF_GetBuffer(buffer_).decode('utf-8')
return value
class StringGauge(Metric):
"""A stateful class for updating a gauge-like string metric.
This class encapsulates a set of string values (or a single value for a
label-less metric). Each value is identified by a tuple of labels. The class
allows the user to set each value.
"""
def __init__(self, name, description, *labels):
"""Creates a new StringGauge.
Args:
name: name of the new metric.
description: description of the new metric.
*labels: The label list of the new metric.
"""
super(StringGauge, self).__init__('StringGauge', _string_gauge_methods,
len(labels), name, description, *labels)
def get_cell(self, *labels):
"""Retrieves the cell."""
return StringGaugeCell(super(StringGauge, self).get_cell(*labels))
class BoolGaugeCell(object):
"""A single boolean value stored in an `BoolGauge`."""
def __init__(self, cell):
"""Creates a new BoolGaugeCell.
Args:
cell: A c pointer of TFE_MonitoringBoolGaugeCell.
"""
self._cell = cell
def set(self, value):
"""Atomically set the value.
Args:
value: bool value.
"""
pywrap_tensorflow.TFE_MonitoringBoolGaugeCellSet(self._cell, value)
def value(self):
"""Retrieves the current value."""
return pywrap_tensorflow.TFE_MonitoringBoolGaugeCellValue(self._cell)
class BoolGauge(Metric):
"""A stateful class for updating a gauge-like bool metric.
This class encapsulates a set of boolean values (or a single value for a
label-less metric). Each value is identified by a tuple of labels. The class
allows the user to set each value.
"""
def __init__(self, name, description, *labels):
"""Creates a new BoolGauge.
Args:
name: name of the new metric.
description: description of the new metric.
*labels: The label list of the new metric.
"""
super(BoolGauge, self).__init__('BoolGauge', _bool_gauge_methods,
len(labels), name, description, *labels)
def get_cell(self, *labels):
"""Retrieves the cell."""
return BoolGaugeCell(super(BoolGauge, self).get_cell(*labels))
class SamplerCell(object):
"""SamplerCell stores each value of a Sampler."""
def __init__(self, cell):
"""Creates a new SamplerCell.
Args:
cell: A c pointer of TFE_MonitoringSamplerCell.
"""
self._cell = cell
def add(self, value):
"""Atomically add a sample.
Args:
value: float value.
"""
pywrap_tensorflow.TFE_MonitoringSamplerCellAdd(self._cell, value)
def value(self):
"""Retrieves the current distribution of samples.
Returns:
A HistogramProto describing the distribution of samples.
"""
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_MonitoringSamplerCellValue(self._cell, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
histogram_proto = summary_pb2.HistogramProto()
histogram_proto.ParseFromString(compat.as_bytes(proto_data))
return histogram_proto
class Buckets(object):
"""Bucketing strategies for the samplers."""
def __init__(self, buckets):
"""Creates a new Buckets.
Args:
buckets: A c pointer of TFE_MonitoringBuckets.
"""
self.buckets = buckets
def __del__(self):
pywrap_tensorflow.TFE_MonitoringDeleteBuckets(self.buckets)
class ExponentialBuckets(Buckets):
"""Exponential bucketing strategy.
Sets up buckets of the form:
[-DBL_MAX, ..., scale * growth^i,
scale * growth_factor^(i + 1), ..., DBL_MAX].
"""
def __init__(self, scale, growth_factor, bucket_count):
"""Creates a new exponential Buckets.
Args:
scale: float
growth_factor: float
bucket_count: integer
"""
super(ExponentialBuckets, self).__init__(
pywrap_tensorflow.TFE_MonitoringNewExponentialBuckets(
scale, growth_factor, bucket_count))
class Sampler(Metric):
"""A stateful class for updating a cumulative histogram metric.
This class encapsulates a set of histograms (or a single histogram for a
label-less metric) configured with a list of increasing bucket boundaries.
Each histogram is identified by a tuple of labels. The class allows the
user to add a sample to each histogram value.
"""
def __init__(self, name, buckets, description, *labels):
"""Creates a new Sampler.
Args:
name: name of the new metric.
buckets: bucketing strategy of the new metric.
description: description of the new metric.
*labels: The label list of the new metric.
"""
super(Sampler, self).__init__('Sampler', _sampler_methods, len(labels),
name, buckets.buckets, description, *labels)
def get_cell(self, *labels):
"""Retrieves the cell."""
return SamplerCell(super(Sampler, self).get_cell(*labels))
|
tensorflow-master
|
tensorflow/python/eager/monitoring.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@parameterized.named_parameters(
dict(testcase_name='Defun', function_decorator=function.defun),
dict(testcase_name='DefFunction', function_decorator=def_function.function))
class ArgumentNamingTests(test.TestCase, parameterized.TestCase):
"""Tests for recognizable export signatures from concrete functions."""
def testBasic(self, function_decorator):
@function_decorator
def fn(a, b):
return a + b, a * b
# Call the function to make def_function happy
fn(array_ops.ones([]), array_ops.ones([]))
fn_op = fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['a', 'b'],
[inp.op.name for inp in fn_op.inputs])
self.assertEqual(
[b'a', b'b'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])
self.assertEqual(2, len(fn_op.graph.structured_outputs))
self.assertAllClose(
[3., 2.],
fn_op(constant_op.constant(1.), constant_op.constant(2.)))
self.assertAllClose(
[3., 2.],
fn_op(a=constant_op.constant(1.), b=constant_op.constant(2.)))
def testVariable(self, function_decorator):
@function_decorator
def fn(a, b):
return a + b, a * b
# Call the function to make def_function happy
fn(array_ops.ones([]), array_ops.ones([]))
fn_op = fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),
variables.Variable(1.))
self.assertEqual(
['a', 'b'],
[inp.op.name for inp in fn_op.inputs])
self.assertEqual(
[b'a', b'b'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])
self.assertEqual(2, len(fn_op.graph.structured_outputs))
def testDictReturned(self, function_decorator):
@function_decorator
def fn(x, z=(1., 2.), y=3.):
z1, z2 = z
return {'alpha': x + y + z1, 'beta': x * y + z2}
# Call the function to make def_function happy
fn(array_ops.ones([]))
fn_op = fn.get_concrete_function(
x=tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),
y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['x', 'y'],
[inp.op.name for inp in fn_op.inputs])
self.assertEqual(
[b'x', b'y'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])
self.assertEqual({'alpha', 'beta'},
set(fn_op.graph.structured_outputs.keys()))
fn_op2 = fn.get_concrete_function(
z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32,
name='z_first'),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32,
name='z_second')),
y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'),
x=4.)
self.assertEqual(
['z_first', 'z_second', 'custom'],
[inp.op.name for inp in fn_op2.inputs])
self.assertEqual(
[b'z_first', b'z_second', b'custom'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op2.inputs])
fn_op3 = fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'),
z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32,
name='z1'),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z2')),
y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['custom', 'z1', 'z2', 'y'],
[inp.op.name for inp in fn_op3.inputs])
self.assertEqual(
[b'custom', b'z1', b'z2', b'y'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op3.inputs])
def testMethod(self, function_decorator):
class HasMethod(object):
@function_decorator
def method(self, x):
return x
has_method = HasMethod()
# Call the function to make def_function happy
HasMethod.method(has_method, array_ops.ones([]))
class_op = HasMethod.method.get_concrete_function(
has_method, tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['x'],
[inp.op.name for inp in class_op.inputs])
self.assertEqual(
[b'x'],
[inp.op.get_attr('_user_specified_name') for inp in class_op.inputs])
# Call the function to make def_function happy
has_method.method(array_ops.ones([]))
method_op = has_method.method.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['x'],
[inp.op.name for inp in method_op.inputs])
self.assertEqual(
[b'x'],
[inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])
# TODO(allenl): It should be possible to override names when exporting. Do
# TensorSpec names need to go in cache keys? Or maybe get_concrete_function
# should always retrace?
self.skipTest('Not working')
method_op = has_method.method.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='y'))
self.assertEqual(
['y'],
[inp.op.name for inp in method_op.inputs])
self.assertEqual(
[b'y'],
[inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])
def testMethodSignature(self, function_decorator):
class HasMethod(object):
@function_decorator(
input_signature=(tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float64, name='y'),))
def method(self, x):
hash(self) # No weak proxies passed as `self`
return x
has_method = HasMethod()
# Call the function to make def_function happy
has_method.method(array_ops.ones([], dtype=dtypes.float64))
method_op = has_method.method.get_concrete_function()
self.assertEqual(
['y'],
[inp.op.name for inp in method_op.inputs])
self.assertEqual(
[b'y'],
[inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])
method_op2 = has_method.method.get_concrete_function()
self.assertEqual(
['y'],
[inp.op.name for inp in method_op2.inputs])
self.assertEqual(
[b'y'],
[inp.op.get_attr('_user_specified_name') for inp in method_op2.inputs])
def testVariadic(self, function_decorator):
@function_decorator
def variadic_fn(x, *args, **kwargs):
return x + math_ops.add_n(list(args) + list(kwargs.values()))
# Call the function to make def_function happy
variadic_fn(array_ops.ones([]), array_ops.ones([]))
variadic_op = variadic_fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32, name='y'),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32,
name='second_variadic'),
z=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
zz=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='cust'))
self.assertEqual(
['x', 'y', 'args_1', 'second_variadic', 'z', 'cust'],
[inp.op.name for inp in variadic_op.inputs])
self.assertEqual(
[b'x', b'y', b'args_1', b'second_variadic', b'z', b'cust'],
[inp.op.get_attr('_user_specified_name')
for inp in variadic_op.inputs])
def testVariadicInputSignature(self, function_decorator):
@function_decorator(
input_signature=(
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32, name='y'),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z'),
))
def variadic_fn(x, *args):
return x + math_ops.add_n(list(args))
# Call the function to make def_function happy
variadic_fn(array_ops.ones([]), array_ops.ones([]),
array_ops.ones([]), array_ops.ones([]))
variadic_op = variadic_fn.get_concrete_function()
self.assertIn(b'variadic_fn', variadic_op.name)
self.assertEqual(
['x', 'y', 'args_1', 'z'],
[inp.op.name for inp in variadic_op.inputs])
self.assertEqual(
[b'x', b'y', b'args_1', b'z'],
[inp.op.get_attr('_user_specified_name')
for inp in variadic_op.inputs])
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
test.main()
|
tensorflow-master
|
tensorflow/python/eager/function_argument_naming_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient tape utilites."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.lazy_loader import LazyLoader
# There is a circular dependency between this, ops.py, and
# distribution_strategy_context.
# TODO(b/117329403): Remove this circular dependency.
distribution_strategy_context = LazyLoader(
"distribution_strategy_context", globals(),
"tensorflow.python.distribute."
"distribution_strategy_context")
class Tape(object):
"""Represents a gradient propagation trace."""
def __init__(self, tape):
self._tape = tape
def watched_variables(self):
return pywrap_tensorflow.TFE_Py_TapeWatchedVariables(self._tape)
def push_new_tape(persistent=False, watch_accessed_variables=True):
"""Pushes a new tape onto the tape stack."""
tape = pywrap_tensorflow.TFE_Py_TapeSetNew(persistent,
watch_accessed_variables)
return Tape(tape)
def push_tape(tape):
"""Pushes an existing tape onto the tape stack."""
pywrap_tensorflow.TFE_Py_TapeSetAdd(tape._tape) # pylint: disable=protected-access
def watch(tape, tensor):
"""Marks this tensor to be watched by the given tape."""
pywrap_tensorflow.TFE_Py_TapeWatch(tape._tape, tensor) # pylint: disable=protected-access
def watch_variable(tape, variable):
"""Marks this variable to be watched by the given tape."""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.experimental_local_results(variable)
for var in variables:
pywrap_tensorflow.TFE_Py_TapeWatchVariable(tape._tape, var) # pylint: disable=protected-access
def variable_accessed(variable):
"""Notifies all tapes in the stack that a variable has been accessed.
Args:
variable: variable to be watched.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.experimental_local_results(variable)
for var in variables:
pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var)
def variables_accessed(variables):
"""Notifies all tapes in the stack that variables have been accessed.
Only trainable variables are marked as accessed.
Args:
variables: iterable of variables to mark as accessed.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
accessed = []
if context:
accessed = [strategy.extended.value_container(variable)
for variable in variables if variable.trainable]
else:
for variable in variables:
if variable.trainable:
accessed.extend(strategy.experimental_local_results(variable))
for var in accessed:
pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var)
def pop_tape(tape):
"""Pops the given tape in the stack."""
pywrap_tensorflow.TFE_Py_TapeSetRemove(tape._tape) # pylint: disable=protected-access
@contextlib.contextmanager
def stop_recording():
is_stopped = pywrap_tensorflow.TFE_Py_TapeSetIsStopped()
try:
if not is_stopped:
pywrap_tensorflow.TFE_Py_TapeSetStopOnThread()
yield
finally:
if not is_stopped:
pywrap_tensorflow.TFE_Py_TapeSetRestartOnThread()
def should_record(tensors):
"""Returns true if any tape in the stack watches any of these tensors."""
return pywrap_tensorflow.TFE_Py_TapeSetShouldRecord(tensors)
def record_operation(op_type, output_tensors, input_tensors, backward_function):
"""Records the operation on all tapes in the stack."""
pywrap_tensorflow.TFE_Py_TapeSetRecordOperation(
op_type, output_tensors, input_tensors, backward_function)
def delete_trace(tensor_id):
"""Deletes traces for this Tensor from all tapes in the stack."""
pywrap_tensorflow.TFE_Py_TapeSetDeleteTrace(tensor_id)
def could_possibly_record():
"""Returns True if any tape is active."""
return not pywrap_tensorflow.TFE_Py_TapeSetIsEmpty()
|
tensorflow-master
|
tensorflow/python/eager/tape.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import weakref
from six.moves import range
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class _ModelWithOptimizer(training.Model):
def __init__(self):
super(_ModelWithOptimizer, self).__init__()
self.dense = core.Dense(1)
self.optimizer = adam.AdamOptimizer(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {'loss': loss}
class _HasDecoratedMethod(object):
@def_function.function
def f(self, x):
return x * 3.
class DefFunctionTest(test.TestCase):
def testNoVariables(self):
@def_function.function
def fn(x):
return 2 * x
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@def_function.function
def fn(x):
return variables.Variable(1.0) + x
with self.assertRaises(ValueError):
fn(1.0)
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@def_function.function
def fn(x):
state.append(variables.Variable(1.0))
return state[-1] + x
with self.assertRaises(ValueError):
fn(1.0)
def testRange(self):
@def_function.function
def f(unused_x):
return 1.0
self.assertAllEqual(f(range(5)), 1.0)
def testCorrectVariableCreation(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testFunctionInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
def testFunctionInitializationFunction(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
init_fn = fn.get_initialization_function(constant_op.constant(1.0))
self.assertEqual(len(state), 1)
self.assertFalse(
resource_variable_ops.var_is_initialized_op(state[0].handle))
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
def testVariableInitializerNotConstant(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testLegacyGraphModeVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(self.evaluate(result), 6.0)
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
two = constant_op.constant(2.0)
four = two * two
two_again = math_ops.sqrt(four)
state.append(variables.Variable(two_again + four))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(self.evaluate(result), 18.0)
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
with self.assertRaisesRegexp(
lift_to_graph.UnliftableError, r'transitively.* mul .* x'):
fn(constant_op.constant(3.0))
def testMethod(self):
class MyModel(object):
def __init__(self):
self.var = None
@def_function.function
def apply(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return self.var * x
m0 = MyModel()
self.assertAllEqual(m0.apply(3.0), 6.0)
# Calling twice to exercise that we do not recreate variables.
m0.var.assign(3.0)
self.assertAllEqual(m0.apply(3.0), 9.0)
m1 = MyModel()
self.assertAllEqual(m1.apply(3.0), 6.0)
def test_functools_partial(self):
self.assertAllClose(
3.,
def_function.function(functools.partial(lambda x, y: x + y, 1.))(
constant_op.constant(2.)))
def test_functools_partial_new_default(self):
def f(x=3, y=7):
return x + y
func = def_function.function(functools.partial(f, y=6))
self.assertEqual(func().numpy(), 9)
self.assertEqual(func(y=8).numpy(), 11)
def test_functools_partial_keywords(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1])))
self.assertAllEqual(func(), [0.0])
def test_functools_partial_single_positional(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, constant_op.constant(1)))
self.assertAllEqual(func(5), 6)
def test_complicated_partial_with_defaults(self):
def identity(*args):
return args
def dynamic_unroll(core_fn,
input_sequence,
initial_state,
sequence_length=None,
parallel_iterations=1,
swap_memory=False):
del core_fn
self.assertIs(None, sequence_length)
self.assertEqual(1, parallel_iterations)
self.assertTrue(swap_memory)
return input_sequence, initial_state
input_sequence = random_ops.random_uniform([1, 1, 1])
initial_state = random_ops.random_uniform([1, 1])
func = def_function.function(
functools.partial(dynamic_unroll, identity, swap_memory=True))
func(input_sequence, initial_state)
def test_unspecified_default_argument(self):
wrapped = def_function.function(
lambda x, y=2: x + y,
input_signature=[tensor_spec.TensorSpec((), dtypes.int32)])
self.assertEqual(3, wrapped(constant_op.constant(1)).numpy())
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
model(x, y)
def test_concrete_function_from_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def compute(x):
return 2. * x
concrete = compute.get_concrete_function()
self.assertAllClose(1., concrete(constant_op.constant(0.5)))
concrete = compute.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(4., concrete(constant_op.constant(2.)))
signature_args, _ = concrete.structured_input_signature
self.assertEqual(signature_args,
(tensor_spec.TensorSpec(
None, dtypes.float32, name='x'),))
@test_util.run_in_graph_and_eager_modes
def test_variable_naming(self):
class HasVars(module.Module):
def __init__(self):
self.x = None
self.y = None
self.z = None
@def_function.function
def make_x(self):
if self.x is None:
self.x = variables.Variable(1., name='v')
def make_y(self):
if self.y is None:
self.y = variables.Variable(1., name='v')
def make_z(self):
if self.z is None:
with ops.name_scope('z_scope'):
self.z = variables.Variable(1., name='z')
root = HasVars()
root.make_x()
root.make_y()
root.make_z()
self.assertEqual('v:0', root.x.name)
self.assertEqual('z_scope/z:0', root.z.name)
def test_concrete_function_keyword_arguments(self):
@def_function.function
def f(x):
return x
conc = f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32, 'y'))
conc(y=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('y', signature_args[0].name)
conc = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32))
conc(x=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('x', signature_args[0].name)
@def_function.function
def g(x):
return x[0]
conc = g.get_concrete_function(
[tensor_spec.TensorSpec(None, dtypes.float32, 'z'), 2])
conc(z=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('z', signature_args[0][0].name)
with self.assertRaisesRegexp(
ValueError, 'either zero or all names have to be specified'):
conc = g.get_concrete_function([
tensor_spec.TensorSpec(None, dtypes.float32, 'z'),
tensor_spec.TensorSpec(None, dtypes.float32),
])
def test_error_inner_capture(self):
@def_function.function
def f(inputs):
num_steps, _ = inputs.shape[:2]
outputs = []
for t in math_ops.range(num_steps):
outputs.append(inputs[t])
return outputs
with self.assertRaisesRegexp(ValueError, 'inner'):
f(array_ops.zeros(shape=(8, 42, 3)))
def testRuntimeErrorNotSticky(self):
@def_function.function
def fail(i):
control_flow_ops.Assert(math_ops.equal(i, 0), ['ick'])
fail(constant_op.constant(0)) # OK
with self.assertRaises(errors.InvalidArgumentError):
fail(constant_op.constant(1)) # InvalidArgument: "ick"
fail(constant_op.constant(0)) # OK
def testUnderscoreName(self):
@def_function.function
def f(_):
return _ + _
self.assertAllEqual(2.0, f(constant_op.constant(1.0)))
def test_serialization_signature_cache(self):
@def_function.function
def f(x, y):
return x, y
f(constant_op.constant([[3., 4.]]), constant_op.constant([2.]))
f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2]))
signatures_args = set()
concrete_functions = f._list_all_concrete_functions_for_serialization()
for concrete_function in concrete_functions:
args, kwargs = concrete_function.structured_input_signature
signatures_args.add(args)
self.assertEqual(dict(), kwargs)
self.assertEqual(
signatures_args,
set(((tensor_spec.TensorSpec([1, 2], dtypes.float32, name='x'),
tensor_spec.TensorSpec([1], dtypes.float32, name='y')),
(tensor_spec.TensorSpec([1, 3], dtypes.int32, name='x'),
tensor_spec.TensorSpec([1], dtypes.int32, name='y')))))
@test_util.assert_no_garbage_created
def testFunctionReferenceCycles(self):
fn = def_function.function(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_garbage_created
def testMethodReferenceCycles(self):
has_decorated_method = _HasDecoratedMethod()
has_decorated_method.f(constant_op.constant(5.))
weak_fn = weakref.ref(has_decorated_method.f)
del has_decorated_method
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
def testErrorMessageWhenGraphTensorIsPassedToEager(self):
@def_function.function
def failing_function():
a = constant_op.constant(1.)
with ops.init_scope():
_ = a + a
with self.assertRaisesRegexp(
TypeError,
re.compile('An op outside of the function.*passed.*Const', re.DOTALL)):
failing_function()
def testNonUniqueNamesGetConcreteFunction(self):
@def_function.function
def non_unique_arg_names(x, **kwargs):
a, b, c = x
d = kwargs['d']
return a + b + c + d
concrete = non_unique_arg_names.get_concrete_function(
(tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)),
d=tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(
10.,
concrete(x=constant_op.constant(1.),
x_1=constant_op.constant(2.),
x_2=constant_op.constant(3.),
d=constant_op.constant(4.)))
self.assertAllClose(
10.,
concrete(constant_op.constant(1.),
constant_op.constant(2.),
constant_op.constant(3.),
constant_op.constant(4.)))
def testVariableCreatorScope(self):
created_variables = []
captured_variables = []
@def_function.function
def f():
if not created_variables:
created_variables.append(variables.Variable(1.))
return created_variables[0] + 1.
def capture_creator(next_creator, **kwargs):
created = next_creator(**kwargs)
captured_variables.append(created)
return created
with variable_scope.variable_creator_scope(capture_creator):
f()
self.assertEqual(created_variables, captured_variables)
def testVarAlreadyInitializedNoClobbering(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
add_var.get_concrete_function(constant_op.constant(2.))
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
def testSameVariableTwice(self):
v = variables.Variable(1.0)
@def_function.function
def add(a, b):
return a + b
self.assertAllEqual(add(v, v), 2.0)
def testShapeCache(self):
@def_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
func_b = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
self.assertIs(func_a, func_b)
def testInitializationInNestedCall(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
@def_function.function
def wrapper(x):
return add_var(x)
self.assertAllClose([13., 14.], wrapper(constant_op.constant(2.)))
v_holder[1].assign(11.)
self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.)))
@test_util.run_gpu_only
def testDeviceAnnotationRespected(self):
a = []
@def_function.function()
def create_variable():
with ops.init_scope():
initial_value = random_ops.random_uniform(
(2, 2), maxval=1000000, dtype=dtypes.int64)
if not a:
with ops.device("CPU:0"):
a.append(resource_variable_ops.ResourceVariable(initial_value))
return a[0].read_value()
created_variable_read = create_variable()
self.assertRegexpMatches(created_variable_read.device, "CPU")
def testDecorate(self):
func = def_function.function(lambda: 1)
def decorator(f):
return lambda: 1 + f()
func._decorate(decorator)
self.assertEqual(func().numpy(), 2)
def testLiftPlaceholderInitializedVariable(self):
with ops.Graph().as_default():
var_list = []
@def_function.function
def use_variable():
if not var_list:
initial_value = array_ops.placeholder(shape=[], dtype=dtypes.float32)
v = variables.Variable(initial_value)
var_list.append(v)
return var_list[0] + 1.
var_plus_one = use_variable()
with self.session() as session:
init_op = var_list[0].initializer
session.run(init_op, feed_dict={init_op.inputs[1]: 2.})
self.assertEqual(3., session.run(var_plus_one))
def testDecorate_rejectedAfterTrace(self):
func = def_function.function(lambda: 1)
self.assertEqual(func().numpy(), 1)
msg = 'Functions cannot be decorated after they have been traced.'
with self.assertRaisesRegexp(ValueError, msg):
func._decorate(lambda f: f)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/eager/def_function_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions called by the generated code to execute an eager-mode op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch.
(Explicitly provided instead of being inferred for performance
reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
"""
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
op_name, inputs, attrs,
num_outputs)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
except TypeError as e:
if any(ops._is_keras_symbolic_tensor(x) for x in inputs):
raise core._SymbolicException
raise e
# pylint: enable=protected-access
return tensors
def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Monkey-patch to execute to enable execution callbacks."""
tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
for callback in ctx.post_execution_callbacks:
callback(op_name, inputs, attrs, tensors, name)
return tensors
execute = quick_execute
def record_gradient(unused_op_name, unused_inputs, unused_attrs, unused_results,
unused_name):
"""Import backprop if you want gradients recorded."""
pass
def make_float(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def make_int(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def make_str(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def make_bool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def make_type(v, arg_name):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(arg_name, repr(v)))
i = v.as_datatype_enum
return i
def make_shape(v, arg_name):
"""Convert v into a list."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# None if the rank is unknown, otherwise a list of ints (or Nones in the
# position where the dimension is unknown).
try:
shape = tensor_shape.as_shape(v)
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name,
e))
if shape.ndims is None:
return None
else:
return shape.as_list()
def make_tensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
elif isinstance(v, six.string_types):
pb = tensor_pb2.TensorProto()
text_format.Merge(v, pb)
return pb
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'." %
(repr(v), arg_name))
def args_to_matching_eager(l, ctx, default_dtype=None):
"""Convert sequence `l` to eager same-type Tensors."""
EagerTensor = ops.EagerTensor # pylint: disable=invalid-name
for x in l:
if not isinstance(x, EagerTensor):
break
else: # note: intentional for-else
return l[0]._datatype_enum(), l # pylint: disable=protected-access
# TODO(josh11b): Could we do a better job if we also passed in the
# allowed dtypes when that was known?
# Is some input already a Tensor with a dtype?
dtype = None
for t in l:
if isinstance(t, EagerTensor):
dtype = t.dtype
break
internal_convert_to_tensor = ops.internal_convert_to_tensor
if dtype is None:
# Infer a dtype based on the first value, and use that dtype for the
# remaining values.
ret = []
for t in l:
ret.append(internal_convert_to_tensor(
t, dtype,
preferred_dtype=default_dtype,
ctx=ctx,
accept_symbolic_tensors=False))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l]
return dtype.as_datatype_enum, ret
def convert_to_mixed_eager_tensors(values, ctx):
v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
return types, v
def args_to_mixed_eager_tensors(lists, ctx):
"""Converts a list of same-length lists of values to eager tensors."""
assert len(lists) > 1
# Generate an error if len(lists[i]) is not the same for all i.
lists_ret = []
for l in lists[1:]:
if len(l) != len(lists[0]):
raise ValueError(
"Expected list arguments to be the same length: %d != %d (%r vs. %r)."
% (len(lists[0]), len(l), lists[0], l))
lists_ret.append([])
# Convert the first element of each list first, then the second element, etc.
types = []
for i in range(len(lists[0])):
dtype = None
# If any list has a Tensor, use that dtype
for l in lists:
if isinstance(l[i], ops.EagerTensor):
dtype = l[i].dtype
break
if dtype is None:
# Convert the first one and use its dtype.
lists_ret[0].append(ops.internal_convert_to_tensor(lists[0][i], ctx=ctx))
dtype = lists_ret[0][i].dtype
for j in range(1, len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
else:
# Convert everything to the found dtype.
for j in range(len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
types.append(dtype.as_datatype_enum)
return types, lists_ret
|
tensorflow-master
|
tensorflow/python/eager/execute.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph-only versions of a few op functions, for internal use only."""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
def graph_zeros_like(tensor):
"""Graph-only version of tf.zeros_like(), for internal use only."""
g = ops._get_graph_from_inputs([tensor]) # pylint: disable=protected-access
with g.as_default(), ops.name_scope(None, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
dtype = tensor.dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
op = g.create_op("ZerosLike", [tensor], [dtype], input_types=[dtype],
attrs={"T": dtype_value}, name=name)
result, = op.outputs
return result
def graph_placeholder(dtype, shape, name=None):
"""Graph-only version of tf.compat.v1.placeholder(), for internal use only."""
dtype = dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
if isinstance(shape, (list, tuple)):
shape = tensor_shape.TensorShape(shape)
shape = attr_value_pb2.AttrValue(shape=shape.as_proto())
g = ops.get_default_graph()
with ops.name_scope(name, "placeholder", []) as name:
op = g.create_op("Placeholder", [], [dtype], input_types=[],
attrs={"dtype": dtype_value, "shape": shape}, name=name)
result, = op.outputs
return result
|
tensorflow-master
|
tensorflow/python/eager/graph_only_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for TensorFlow "Eager" Mode's Tensor class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import sys
import unittest
import numpy as np
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import io_ops
def _create_tensor(value, device=None, dtype=None):
context.ensure_initialized()
ctx = context.context()
if device is None:
device = ctx.device_name
if dtype is not None:
dtype = dtype.as_datatype_enum
try:
return ops.EagerTensor(
value, context=ctx._handle, device=device, dtype=dtype)
except core._NotOkStatusException as e: # pylint: disable=protected-access
raise core._status_to_exception(e.code, e.message)
class TFETensorTest(test_util.TensorFlowTestCase):
def testScalarTensor(self):
t = _create_tensor(3, dtype=dtypes.int32)
self.assertAllEqual(t, _create_tensor(np.array(3)))
self.assertEqual(dtypes.int32, t.dtype)
self.assertEqual(0, t.shape.ndims)
self.assertAllEqual([], t.shape.as_list())
self.assertIn("tf.Tensor", str(t))
self.assertIn("tf.Tensor", repr(t))
def testBadConstructorArgs(self):
context.ensure_initialized()
ctx = context.context()
handle = ctx._handle
device = ctx.device_name
# Missing context.
with self.assertRaisesRegexp(
TypeError, r".*argument 'context' \(pos 2\).*"):
ops.EagerTensor(1, device=device)
# Missing device.
with self.assertRaisesRegexp(
TypeError, r".*argument 'device' \(pos 3\).*"):
ops.EagerTensor(1, context=handle)
# Bad dtype type.
with self.assertRaisesRegexp(TypeError,
"Expecting a DataType value for dtype. Got"):
ops.EagerTensor(1, context=handle, device=device, dtype="1")
# Following errors happen when trying to copy to GPU.
if not test_util.is_gpu_available():
self.skipTest("No GPUs found")
with ops.device("/device:GPU:0"):
device = ctx.device_name
# Bad context.
with self.assertRaisesRegexp(
TypeError, "Expecting a PyCapsule encoded context handle. Got"):
ops.EagerTensor(1.0, context=1, device=device)
# Bad device.
with self.assertRaisesRegexp(
TypeError, "Error parsing device argument to CopyToDevice"):
ops.EagerTensor(1.0, context=handle, device=1)
def testNumpyValue(self):
values = np.array([3.0])
t = _create_tensor(values)
self.assertAllEqual(values, t)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNumpyDtypeSurvivesThroughTensorConversion(self):
scalar_creators = [np.int32, np.int64, np.float32, np.float64]
conversion_functions = [ops.convert_to_tensor, constant_op.constant]
for scalar_creator in scalar_creators:
for conversion_function in conversion_functions:
np_val = scalar_creator(3)
tensor_val = conversion_function(np_val)
self.assertEqual(tensor_val.numpy().dtype, np_val.dtype)
self.assertEqual(tensor_val.numpy(), np_val)
def testNumpyValueWithCast(self):
values = np.array([3.0], dtype=np.float32)
t = _create_tensor(values, dtype=dtypes.float64)
self.assertAllEqual(values, t)
ctx = context.context()
# Bad dtype value.
with self.assertRaisesRegexp(TypeError, "Invalid dtype argument value"):
ops.EagerTensor(
values, context=ctx._handle, device=ctx.device_name, dtype=12345)
def testNumpyOrderHandling(self):
n = np.array([[1, 2], [3, 4]], order="F")
t = _create_tensor(n)
self.assertAllEqual([[1, 2], [3, 4]], t)
def testNumpyArrayDtype(self):
tensor = constant_op.constant([1.0, 2.0, 3.0])
numpy_tensor = np.asarray(tensor, dtype=np.int32)
self.assertAllEqual(numpy_tensor, [1, 2, 3])
def testNdimsAgreesWithNumpy(self):
numpy_tensor = np.asarray(1.0)
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
numpy_tensor = np.asarray([1.0, 2.0, 3.0])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
def testLenAgreesWithNumpy(self):
numpy_tensor = np.asarray(1.0)
tensor = constant_op.constant(numpy_tensor)
with self.assertRaises(TypeError):
len(numpy_tensor)
with self.assertRaisesRegexp(
TypeError, r"Scalar tensor has no `len[(][)]`"):
len(tensor)
numpy_tensor = np.asarray([1.0, 2.0, 3.0])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(len(numpy_tensor), len(tensor))
numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(len(numpy_tensor), len(tensor))
def testCopy(self):
t = constant_op.constant(1.0)
tt = copy.copy(t)
self.assertAllEqual(tt, 1.0)
del tt
tt = copy.deepcopy(t)
self.assertAllEqual(tt, 1.0)
del tt
self.assertAllEqual(t, 1.0)
def testConstantDtype(self):
self.assertEqual(
constant_op.constant(1, dtype=np.int64).dtype, dtypes.int64)
def testTensorAndNumpyMatrix(self):
expected = np.array([[1.0, 2.0], [3.0, 4.0]], np.float32)
actual = _create_tensor([[1.0, 2.0], [3.0, 4.0]])
self.assertAllEqual(expected, actual)
self.assertEqual(np.float32, actual.dtype)
self.assertEqual(dtypes.float32, actual.dtype)
self.assertAllEqual([2, 2], actual.shape.as_list())
def testFloatDowncast(self):
# Unless explicitly specified, float64->float32
t = _create_tensor(3.0)
self.assertEqual(dtypes.float32, t.dtype)
t = _create_tensor(3.0, dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t.dtype)
def testBool(self):
self.assertFalse(bool(_create_tensor(False)))
self.assertFalse(bool(_create_tensor([False])))
self.assertFalse(bool(_create_tensor([[False]])))
self.assertFalse(bool(_create_tensor([0])))
self.assertFalse(bool(_create_tensor([0.])))
self.assertTrue(bool(_create_tensor([1])))
self.assertTrue(bool(_create_tensor([1.])))
@unittest.skipUnless(six.PY2, "long has been removed in PY3")
def testLong(self):
self.assertEqual(long(_create_tensor(long(42))), 42)
def testIndex(self):
self.assertEqual([42][_create_tensor(0)], 42)
with self.assertRaises(TypeError):
_ = [42][_create_tensor([0])]
def testIntDowncast(self):
t = _create_tensor(3)
self.assertEqual(dtypes.int32, t.dtype)
t = _create_tensor(3, dtype=dtypes.int64)
self.assertEqual(dtypes.int64, t.dtype)
t = _create_tensor(2**33)
self.assertEqual(dtypes.int64, t.dtype)
def testTensorCreationFailure(self):
with self.assertRaises(ValueError):
# Should fail because the each row of the Python object has a different
# number of columns.
self.assertEqual(None, _create_tensor([[1], [1, 2]]))
def testMultiLineTensorStr(self):
t = _create_tensor(np.eye(3))
tensor_str = str(t)
self.assertIn("shape=%s, dtype=%s" % (t.shape, t.dtype.name), tensor_str)
self.assertIn(str(t), tensor_str)
def testMultiLineTensorRepr(self):
t = _create_tensor(np.eye(3))
tensor_repr = repr(t)
self.assertTrue(tensor_repr.startswith("<"))
self.assertTrue(tensor_repr.endswith(">"))
self.assertIn("id=%d, shape=%s, dtype=%s, numpy=\n%r" %
(t._id, t.shape, t.dtype.name, t.numpy()), tensor_repr)
def testTensorStrReprObeyNumpyPrintOptions(self):
orig_threshold = np.get_printoptions()["threshold"]
orig_edgeitems = np.get_printoptions()["edgeitems"]
np.set_printoptions(threshold=2, edgeitems=1)
t = _create_tensor(np.arange(10, dtype=np.int32))
self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t)))
self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t)))
# Clean up: reset to previous printoptions.
np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems)
def testZeroDimTensorStr(self):
t = _create_tensor(42)
self.assertIn("42, shape=(), dtype=int32", str(t))
def testZeroDimTensorRepr(self):
t = _create_tensor(42)
self.assertTrue(repr(t).startswith("<"))
self.assertTrue(repr(t).endswith(">"))
self.assertIn("id=%d, shape=(), dtype=int32, numpy=42" % t._id, repr(t))
def testZeroSizeTensorStr(self):
t = _create_tensor(np.zeros(0, dtype=np.float32))
self.assertIn("[], shape=(0,), dtype=float32", str(t))
def testZeroSizeTensorRepr(self):
t = _create_tensor(np.zeros(0, dtype=np.float32))
self.assertTrue(repr(t).startswith("<"))
self.assertTrue(repr(t).endswith(">"))
self.assertIn("id=%d, shape=(0,), dtype=float32, numpy=%r" % (t._id,
t.numpy()),
repr(t))
def testStringTensor(self):
t_np_orig = np.array([[b"a", b"ab"], [b"abc", b"abcd"]])
t = _create_tensor(t_np_orig)
t_np = t.numpy()
self.assertTrue(np.all(t_np == t_np_orig), "%s vs %s" % (t_np, t_np_orig))
def testIterateOverTensor(self):
l = [[1, 2], [3, 4]]
t = _create_tensor(l)
for list_element, tensor_element in zip(l, t):
self.assertAllEqual(list_element, tensor_element.numpy())
@test_util.run_gpu_only
def testStringTensorOnGPU(self):
with ops.device("/device:GPU:0"):
with self.assertRaisesRegexp(
RuntimeError, "Can't copy Tensor with type string to device"):
_create_tensor("test string")
def testInvalidUTF8ProducesReasonableError(self):
if sys.version_info[0] < 3:
self.skipTest("Test is only valid in python3.")
with self.assertRaises(UnicodeDecodeError):
io_ops.read_file(b"\xff")
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferredDtypeIsRespected(self):
self.assertEqual(
ops.convert_to_tensor(0.5, preferred_dtype=dtypes.int32).dtype,
dtypes.float32)
self.assertEqual(
ops.convert_to_tensor(0.5, preferred_dtype=dtypes.float64).dtype,
dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testCompatibility(self):
integer_types = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
# Floats are not compatible with ints
for t in integer_types:
with self.assertRaises(TypeError):
constant_op.constant(0.5, dtype=t)
# Ints compatible with floats
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float16)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float32)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float64)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.bfloat16)), 5.0)
# Ints and floats are compatible with complex types
self.assertEqual(
constant_op.constant([[1.0]], dtype=dtypes.complex128).dtype,
dtypes.complex128)
self.assertEqual(
constant_op.constant([[1]], dtype=dtypes.complex128).dtype,
dtypes.complex128)
# Quantized types are not compatible with floats
quantized_types = [dtypes.qint16, dtypes.qint32, dtypes.qint8,
dtypes.quint16, dtypes.quint8]
for t in quantized_types:
with self.assertRaises(TypeError):
constant_op.constant(0.5, dtype=t)
# TODO(b/118402529): quantized types are broken in eager.
@test_util.run_in_graph_and_eager_modes
def testCConvertToTensor(self):
with self.assertRaises(TypeError):
_ = constant_op.constant(0) < 0.5
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorAllowsOverflow(self):
_ = ops.convert_to_tensor(123456789, dtype=dtypes.uint8)
@test_util.assert_no_new_pyobjects_executing_eagerly
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNumpyZeroDim(self):
for np_type, dtype in [(np.int32, dtypes.int32),
(np.half, dtypes.half),
(np.float32, dtypes.float32)]:
x = ops.convert_to_tensor([np.array(65, dtype=np_type),
np.array(16, dtype=np_type)])
self.assertEqual(x.dtype, dtype)
self.assertAllEqual(x, [65, 16])
@test_util.assert_no_new_pyobjects_executing_eagerly
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNumpyScalar(self):
x = ops.convert_to_tensor(
[np.array(321, dtype=np.int).item(),
np.array(16, dtype=np.int).item()])
self.assertAllEqual(x, [321, 16])
def testEagerTensorError(self):
with self.assertRaisesRegexp(
TypeError,
"Cannot convert provided value to EagerTensor. "
"Provided value.*Requested dtype.*"):
_ = ops.convert_to_tensor(1., dtype=dtypes.int32)
def testEagerLargeConstant(self):
for t in [dtypes.uint64, dtypes.uint32, dtypes.int32, dtypes.int64]:
self.assertEqual(
constant_op.constant(t.max, dtype=t).numpy(), t.max)
self.assertEqual(
constant_op.constant(t.min, dtype=t).numpy(), t.min)
def test_numpyIsView(self):
t = constant_op.constant([0.0])
t._numpy()[0] = 42.0
self.assertAllClose(t, constant_op.constant([42.0]))
def testMemoryviewIsReadonly(self):
t = constant_op.constant([0.0])
self.assertTrue(memoryview(t).readonly)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMemoryviewScalar(self):
t = constant_op.constant(42.0)
self.assertAllEqual(
np.array(memoryview(t)), np.array(42.0, dtype=np.float32))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMemoryviewEmpty(self):
t = constant_op.constant([], dtype=np.float32)
self.assertAllEqual(np.array(memoryview(t)), np.array([]))
@test_util.run_gpu_only
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMemoryviewCopyToCPU(self):
with ops.device("/device:GPU:0"):
t = constant_op.constant([0.0])
self.assertAllEqual(
np.array(memoryview(t)), np.array([0.0], dtype=np.float32))
class TFETensorUtilTest(test_util.TensorFlowTestCase):
def testListOfThree(self):
t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32)
t2 = _create_tensor([[1, 2, 5], [3, 4, 5]], dtype=dtypes.int32)
t3 = _create_tensor([[1], [3], [5], [6]], dtype=dtypes.int32)
r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 0)
self.assertAllEqual(np.array([3, 2, 4]), r.numpy())
r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 1)
self.assertAllEqual(np.array([2, 3, 1]), r.numpy())
def testEmptyTensorList(self):
a = pywrap_tensorflow.TFE_Py_TensorShapeSlice([], 0)
self.assertTrue(isinstance(a, ops.EagerTensor))
self.assertEqual(0, a.numpy().size)
def testTensorListContainsNonTensors(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(
TypeError,
r"Expected a list of EagerTensors but element 1 has type \"str\""):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, "abc"], 0)
with self.assertRaisesRegexp(
TypeError,
r"Expected a list of EagerTensors but element 0 has type \"int\""):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([2, t1], 0)
def testTensorListNotList(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(
TypeError,
r"tensors argument must be a list or a tuple. Got.*EagerTensor"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice(t1, -2)
def testNegativeSliceDim(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(
ValueError,
r"Slice dimension must be non-negative. Got -2"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], -2)
def testUnicode(self):
self.assertEqual(constant_op.constant(u"asdf").numpy(), b"asdf")
def testFloatTensor(self):
self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype)
self.assertEqual(dtypes.float16, _create_tensor(np.float16()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype)
def testSliceDimOutOfRange(self):
t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32)
t2 = _create_tensor([1, 2], dtype=dtypes.int32)
t3 = _create_tensor(2, dtype=dtypes.int32)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(2\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 2"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], 2)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(1\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 1"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2], 1)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(1\) must be smaller than rank of all tensors, "
"but tensor at index 1 has rank 1"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2], 1)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(0\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 0"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t3], 0)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(0\) must be smaller than rank of all tensors, "
"but tensor at index 2 has rank 0"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2, t1, t3], 0)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testTensorDir(self):
t = array_ops.zeros(1)
t.test_attr = "Test"
instance_dir = dir(t)
type_dir = dir(ops.EagerTensor)
# Monkey patched attributes should show up in dir(t)
self.assertIn("test_attr", instance_dir)
instance_dir.remove("test_attr")
self.assertEqual(instance_dir, type_dir)
def testNonRectangularPackAsConstant(self):
l = [array_ops.zeros((10, 1)).numpy(), array_ops.zeros(1).numpy()]
with self.assertRaisesRegexp(
ValueError, "non-rectangular Python sequence"):
constant_op.constant(l)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/eager/tensor_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for TensorFlow's "Eager" mode of execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
# Trace of execution and memory usage.
_active_trace = None
def _status_to_exception(code, message):
try:
error_class = errors.exception_type_from_error_code(code)
return error_class(None, None, message)
except KeyError:
return errors.UnknownError(None, None, message, code)
class _NotOkStatusException(Exception):
"""Exception class to handle not ok Status."""
def __init__(self, message, code):
super(_NotOkStatusException, self).__init__()
self.message = message
self.code = code
def __str__(self):
e = _status_to_exception(self.code, self.message)
return "%s: %s" % (e.__class__.__name__, e)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(_NotOkStatusException)
class _FallbackException(Exception):
"""Exception class to handle fallback from the fastpath.
The fastpath that we refer to here is the one implemented to reduce per-op
overheads (TFE_Py_FastPathExecute_C). If the conditions for executing the op
on the fastpath are not met, we fallback to a safer (and more complete)
slowpath, and this Exception is raised to signal that transition.
"""
pass
class _SymbolicException(Exception):
"""Exception class to handle use of symbolic tensors when executing eagerly.
`keras.Input()` creates symbolic tensors (in a FuncGraph managed by the
Keras backend) while in eager execution. This exception is used to
identify this case (raised in `convert_to_tensor` cause generated functions
for ops to construct graphs instead of executing the kernel).
"""
pass
pywrap_tensorflow.TFE_Py_RegisterFallbackExceptionClass(_FallbackException)
|
tensorflow-master
|
tensorflow/python/eager/core.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers.pooling import max_pooling3d
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training
class BackpropTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
return g1 * g2 * g3
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_y = tf_g1 * tf_g2 * tf_g3
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad, self.evaluate(tf_dense_grad))
@test_util.run_in_graph_and_eager_modes
def testAggregateGradientsWithTensor(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = math_ops.reduce_sum(tf_var * 2.0, axis=(0, 1))
tf_y = tf_g1 * tf_g2
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
self.assertAllClose(grad, tf_grad)
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(1.0), name='x')
def fn():
b = constant_op.constant(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, constant_op.constant(3.0))
grads_and_vars = backprop.implicit_grad(fn)()
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
def testGradientInsideLoop(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
def body(_):
_ = v + 1.0 # This reads the variable inside the loop context
with backprop.GradientTape() as t:
result = v * 2
self.assertTrue(t.gradient(result, v) is not None)
return 1.0
control_flow_ops.while_loop(lambda i: False, body, [1.0])
def testWhereGradient(self):
# Note: where is special because only some of its arguments are of
# differentiable dtypes.
def f(x):
return array_ops.where(x < 10, x, x * x)
g = backprop.gradients_function(f)
self.assertAllEqual(g(5.)[0], 1.0)
self.assertAllEqual(g(50.)[0], 100.0)
def testTwoTargets(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
xx = 2 * x
yy = 3 * y
dx, dy = t.gradient([xx, yy], [x, y])
self.assertAllEqual(dx, 2.0)
self.assertAllEqual(dy, 3.0)
def testCustomGradientEmptyError(self):
@custom_gradient.custom_gradient
def identity(x):
def grad(_):
return [] # This return value is wrong!
return x, grad
x = variables.Variable(1.0)
with backprop.GradientTape() as t:
y = identity(x)
with self.assertRaises(ValueError):
t.gradient(y, [x])
def testOutputGradUsedInComputation(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
loss = x * y
dx, = t.gradient([loss, x], [x], output_gradients=[1.0, 2.0])
self.assertAllEqual(dx, 4.0)
def testDy(self):
def f(x):
return x
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testGradientInteger(self):
def f(x):
return x + x
int_tensor = constant_op.constant(1)
self.assertEqual(backprop.gradients_function(f)(int_tensor)[0], None)
def testErrors(self):
@custom_gradient.custom_gradient
def f(x):
def grad(_):
raise RuntimeError('x')
return x, grad
# TODO(apassos) raise the right error here
with self.assertRaises(RuntimeError):
backprop.gradients_function(f)(constant_op.constant(1.0))
def testGradientsFunctionInCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
(y,) = backprop.gradients_function(lambda x: x * x)(x)
def grad(dy):
return [2 * dy]
return y, grad
self.assertAllEqual(f(1.0), 2.0)
def testImplicitGradOverEmbeddingLookup(self):
batch_size = 8
embedding_size = 512
vocab_size = 1000
lrn_rate = 0.1
random_init = random_ops.random_uniform([vocab_size, embedding_size])
x = array_ops.ones((batch_size), dtypes.int64)
embedding = resource_variable_ops.ResourceVariable(
initial_value=random_init, dtype=dtypes.float32, name='embedding')
def f():
embedded_x = embedding_ops.embedding_lookup(embedding, x)
return constant_op.constant(1.0, dtypes.float32) - embedded_x
grad = backprop.implicit_grad(f)()[0][0]
opt = training.GradientDescentOptimizer(lrn_rate)
with ops.Graph().as_default(), self.cached_session():
tf_x = array_ops.ones((batch_size), dtypes.int64)
# TODO(ashankar,apassos): Change to ResourceVariable.
tf_embedding = variables.Variable(
random_init.numpy(), name='tf_embedding')
tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)
tf_y = 1.0 - tf_embedded_x
tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]
tf_opt = training.GradientDescentOptimizer(0.1)
tf_embedding.initializer.run()
self.assertAllClose(tf_grad.indices.eval(), grad.indices)
self.assertAllClose(tf_grad.values.eval(), grad.values)
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
expected = self.evaluate(tf_embedding)
opt.apply_gradients([(grad, embedding)])
self.assertAllClose(expected, embedding.read_value())
def testImplicitGradOrdering(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
v1 = resource_variable_ops.ResourceVariable(2.0)
def f():
x = v1 * v1
y = v0 * v0
return x + y
grads = backprop.implicit_grad(f)()
ordered_variables = [x[1] for x in grads]
self.assertTrue(ordered_variables[0] is v0)
self.assertTrue(ordered_variables[1] is v1)
def testTapeNoOpGradient(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x
self.assertEqual(t.gradient(y, x).numpy(), 1.0)
def testTapeIdentityGradientIsIdentity(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = array_ops.identity(x)
self.assertEqual(t.gradient(y, x).numpy(), 1.0)
def testTapeGradientMultiTargetOneIsSource(self):
x = constant_op.constant(2.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x*x
self.assertEqual(t.gradient([x, y], x).numpy(), 5.0)
def testTapeNoOpGradientWithMultiTargetAllSource(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x
self.assertEqual(t.gradient([y, y], x).numpy(), 2.0)
def testTapeNoOpGradientWithMultiTargetMultiSource(self):
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(y)
z = y * y
self.assertAllEqual(t.gradient([x, y, z], [x, y]), [1.0, 11.0])
def testTapeGradientStringTarget(self):
s = constant_op.constant('unknown', dtype=dtypes.string)
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(s)
grads = t.gradient(s, x)
self.assertEqual(grads, None)
def testTapeNoOpGradientStringSourceAndTarget(self):
s = constant_op.constant('unknown', dtype=dtypes.string)
with backprop.GradientTape() as t:
t.watch(s)
grads = t.gradient(s, s)
self.assertEqual(grads, None)
def testTapeNoOpGradientWithMultiTargetMultiSourceIncludeString(self):
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
s = constant_op.constant('unknown', dtype=dtypes.string)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(y)
t.watch(s)
z = y * y
grads = t.gradient([x, y, z, s], [x, y, s])
self.assertAllEqual(grads[:2], [1.0, 11.0])
self.assertEqual(grads[2], None)
def testTapeNoOpOnVariableIsIdentity(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape() as t:
y = v0.read_value()
self.assertEqual(t.gradient(y, v0).numpy(), 1.0)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testTapeNoOpGradient2By2(self):
a_2_by_2 = constant_op.constant(2.0, shape=[2, 2])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
dy_dy = tape.gradient(a_2_by_2, [a_2_by_2])[0]
self.assertAllEqual(dy_dy.numpy(),
constant_op.constant(1.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_pyobjects_executing_eagerly
def testTapeNoOpGradientMultiTarget2By2(self):
a_2_by_2 = constant_op.constant(2.0, shape=[2, 2])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
dy_dy = tape.gradient([a_2_by_2, a_2_by_2], [a_2_by_2])[0]
self.assertAllEqual(dy_dy.numpy(),
constant_op.constant(2.0, shape=[2, 2]).numpy())
def testTapeStopRecording(self):
with backprop.GradientTape() as t:
x = resource_variable_ops.ResourceVariable(1.0)
with t.stop_recording():
y = x * x
self.assertEqual(t.gradient(y, x), None)
def testTapeStopStartRecording(self):
with backprop.GradientTape(persistent=True) as t:
x = resource_variable_ops.ResourceVariable(1.0)
x2 = x * 2 # This should be differentiated through.
with t.stop_recording():
y = x2 * x2
z = x2 * x2
self.assertEqual(t.gradient(y, x2), None)
# If the x*2 was not differentiated through, this would be 2.0, not 4.0
self.assertEqual(t.gradient(z, x2).numpy(), 4.0)
def testTapeReset(self):
with backprop.GradientTape() as t:
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
t.reset()
loss += v * v
self.assertAllEqual(t.gradient(loss, v), 2.0)
def testPythonMax(self):
x = [resource_variable_ops.ResourceVariable(2.),
resource_variable_ops.ResourceVariable(3.),
resource_variable_ops.ResourceVariable(5.)]
with backprop.GradientTape() as t:
f = max(x)
grad = t.gradient(f, x)
self.assertAllEqual(self.evaluate(f), 5.)
self.assertAllEqual(self.evaluate(grad), [None, None, 1.0])
def testAutomaticWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
loss += v * v
self.assertAllEqual([v], t.watched_variables())
def testExplicitWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
@test_util.assert_no_new_tensors
def testGradientNone(self):
def loss(x, l):
return math_ops.reduce_mean(
nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l),
constant_op.constant([0]))
logits = constant_op.constant([[0.0, 0.0]])
labels = constant_op.constant([[1.0, 0.0]])
# softmax_cross_entropy_with_logits returns two outputs and in this case the
# gradient wrt the second is None.
g, = backprop.gradients_function(loss, [0])(logits, labels)
self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])
@test_util.run_in_graph_and_eager_modes
def testGradientWithinTapeBlock(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
with backprop.GradientTape(persistent=True) as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.run_in_graph_and_eager_modes
def testNestedSelfContexts(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
with self.assertRaises(ValueError):
with t:
pass
@test_util.assert_no_new_tensors
def testSecondGrad(self):
def first(x):
l = constant_op.constant([[0.0]])
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x)
x = math_ops.reduce_sum(x, constant_op.constant([0]))
return x
def second(x):
grad = backprop.gradients_function(first, [0])(x)[0]
return math_ops.reduce_sum(grad, constant_op.constant([0]))
f = constant_op.constant([[0.1]])
grad = backprop.gradients_function(second, [0])(f)[0]
self.assertAllEqual([[0.0]], grad)
@test_util.run_in_graph_and_eager_modes
def testWatchingIsTapeLocal(self):
x1 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
x2 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
with backprop.GradientTape() as tape1:
with backprop.GradientTape() as tape2:
tape1.watch(x1)
tape2.watch([x1, x2])
y = x1 ** 3
z = x2 ** 2
dy, dz = tape2.gradient([y, z], [x1, x2])
d2y, d2z = tape1.gradient([dy, dz], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertEqual(self.evaluate(d2y), 12.0)
self.assertIsNone(d2z)
@test_util.assert_no_new_tensors
def testMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=False)
result, vjp = wrapped_fn(constant_op.constant(3.0))
self.assertAllEqual(result, 9.0)
self.assertAllEqual(vjp(2.0)[0], 12.0)
def testPersistentMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=True)
_, vjp = wrapped_fn(constant_op.constant(3.0))
vjp_result1 = vjp(2.0)[0]
vjp_result2 = vjp(2.0)[0]
self.assertAllEqual(vjp_result1, vjp_result2, 12.0)
@test_util.assert_no_new_tensors
def testGradGrad(self):
def sq(x):
return x * x
def grad(x):
value = backprop.gradients_function(sq, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0)
@test_util.assert_no_new_tensors
def testGradGradExp(self):
def grad(x):
value = backprop.gradients_function(math_ops.exp, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0)
@test_util.assert_no_new_tensors
def testStopGradient(self):
grad = backprop.gradients_function(
lambda x: array_ops.stop_gradient(math_ops.argmax(x)))
self.assertAllEqual(grad([0.0])[0], None)
@test_util.assert_no_new_tensors
def testArgmax(self):
def argmax(x):
i = math_ops.argmax(x)
return array_ops.stop_gradient(i)
grad = backprop.gradients_function(argmax)
self.assertAllEqual(grad([0.0])[0], None)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testGPU(self):
def fn(x):
with context.device('/gpu:0'):
b = constant_op.constant(2.0)
c = math_ops.add(x.gpu(), b)
# TODO(apassos): remove cpu below by making TensorVSPace aware
# of devices.
return math_ops.add(c, constant_op.constant(3.0)).cpu()
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testGPUImplicitGrad(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(
constant_op.constant(1.0), name='v')
def f():
with context.device('gpu:0'):
return v.read_value()
self.assertEqual(
backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
@test_util.assert_no_new_tensors
def testCPU(self):
def fn(x):
b = constant_op.constant(2.0)
c = math_ops.add(x, b)
return math_ops.add(c, constant_op.constant(3.0))
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testTensorCopyGPU2CPU2GPU(self):
def f(a, b):
return a.cpu() + b.cpu()
with context.device('/gpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testEmptyParams(self):
def fn(a, b):
return a * b
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
dx, dy = backprop.gradients_function(fn)(x, y)
self.assertAllEqual(dx, y.numpy())
self.assertAllEqual(dy, x.numpy())
@test_util.assert_no_new_tensors
def testUnconnectedNone(self):
v = resource_variable_ops.ResourceVariable(
1.0, name='testUnconnectedNone')
def f():
v.read_value()
return constant_op.constant(1.0)
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
@test_util.assert_no_new_tensors
def testGradientTapeReEnterContext(self):
g = backprop.GradientTape()
with g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2*x
with g:
z = 2*y
grad = g.gradient(target=z, sources=[x])
self.assertEqual(self.evaluate(grad), [4.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=False) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2 * x
grad = g.gradient(target=y, sources=[x, x])
self.assertEqual(self.evaluate(grad), [2.0, 2.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
g.watch(x)
g.watch(y)
z = x * x + x * y
grad = g.gradient(target=z, sources=[x, x])
self.assertEqual(self.evaluate(grad), [11.0, 11.0])
grad = g.gradient(target=z, sources=[y, x])
self.assertEqual(self.evaluate(grad), [3.0, 11.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeStructure(self):
with backprop.GradientTape(persistent=True) as g:
# Using different constant values because constant tensors are
# cached, leading to a different gradient then what one might expect.
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.1)
x3 = constant_op.constant(3.2)
g.watch(x1)
g.watch(x2)
g.watch(x3)
y = x1 + 2 * x2 + 3 * x3
self.assertEqual(self.evaluate(g.gradient(y, x1)), [1.0])
self.assertEqual(self.evaluate(g.gradient(y, (x1,))), (1.0,))
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2))), (1.0, 2.0))
self.assertEqual(self.evaluate(g.gradient(y, [(x1, x2), (x2, x3)])),
[(1.0, 2.0), (2.0, 3.0)])
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2, [x1, x3]))),
(1.0, 2.0, [1.0, 3.0]))
self.assertEqual(self.evaluate(g.gradient(y, [x1, {'x2': x2, 'x3': x3}])),
[1.0, {'x2': 2.0, 'x3': 3.0}])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTape(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape() as gg:
gg.watch(y)
z = 2 * y
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGadientTapeCalledOnConstantTarget(self):
with backprop.GradientTape() as g:
x = variables.Variable([3.0])
y = variables.Variable([2.0])
grad = g.gradient(x, y)
self.assertAllEqual(grad, None)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testGradientTapeWithCond(self):
x = constant_op.constant(3.0)
def true_fn():
return x
def false_fn():
return x * x
with backprop.GradientTape() as g:
g.watch(x)
y = control_flow_ops.cond(x < x, true_fn, false_fn)
if not context.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 6.0)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testGradientTapeWithWhileLoop(self):
i = constant_op.constant(1)
x = constant_op.constant(2.)
def cond(i, _):
return i < 3
def body(i, x):
return i + 1, x * 2
with backprop.GradientTape() as g:
g.watch([x])
_, y = control_flow_ops.while_loop(cond, body, [i, x])
if not context.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 4.0)
@test_util.assert_no_new_tensors
def testGradientTapeGradientCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
g.gradient(z, [x])
with self.assertRaisesRegexp(
RuntimeError, 'GradientTape.gradient can only be called once'):
g.gradient(y, [x])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(dz_dx), 4 * 3 * 3 * 3)
dy_dx = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy_dx), 2 * 3)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testHigherOrderGradient(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x ** 3 # y := x^3
dy_dx = g.gradient(y, x) # dy/dx := 3x^2
d2y_dx2 = g.gradient(dy_dx, x) # d2y/dx2 := 6x
d3y_dx3 = g.gradient(d2y_dx2, x) # d3y/dx3 := 6
x = 3
self.assertEqual(self.evaluate(y), x ** 3)
self.assertEqual(self.evaluate(dy_dx), 3 * x ** 2)
self.assertEqual(self.evaluate(d2y_dx2), 6 * x)
self.assertEqual(self.evaluate(d3y_dx3), 6)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentNestedTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape(persistent=True) as gg:
gg.watch(y)
z = 2 * y
for _ in range(2):
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
del gg
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
grad = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(grad), 12.0)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeVariable(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
self.evaluate(v.initializer)
with backprop.GradientTape() as g:
y = v * v
grad = g.gradient(y, [v])[0]
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testNestedGradients(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch(x)
y = x * x
z = y * y
dz_dx, dz_dy = g.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 108.0)
self.assertEqual(self.evaluate(dz_dy), 18.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsDefault(self):
x = constant_op.constant(1.0)
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x)
self.assertEqual(dz_dx, None)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsZeros(self):
x = constant_op.constant(1.0, shape=[2, 2])
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x, unconnected_gradients='zero')
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx))
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsVariablesZeros(self):
x = resource_variable_ops.ResourceVariable(
constant_op.constant(1., shape=[2, 2]))
self.evaluate(x.initializer)
y = resource_variable_ops.ResourceVariable(constant_op.constant(3.))
self.evaluate(y.initializer)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x, unconnected_gradients='zero')
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx))
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnknownUnconnectedGradientsValueGiven(self):
x = constant_op.constant(1.0)
y = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
with self.assertRaisesRegexp(
ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
g.gradient(z, x, unconnected_gradients='nonsense')
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsNestedDefunZeros(self):
@function.defun
def f(x):
return x * x
@function.defun
def h(y):
z = f(y)
return array_ops.stop_gradient(z)
x = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch(x)
y = h(x)
dy_dx = g.gradient(y, x, unconnected_gradients='zero')
self.assertEqual(0.0, self.evaluate(dy_dx))
@test_util.assert_no_new_tensors
def testEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grads_fn = backprop.val_and_grad_function(fn)
x = 2.0
y = 3.0
val, (dx, dy) = val_and_grads_fn(x, y)
self.assertAllClose(val, x * y)
self.assertAllEqual(dx, y)
self.assertAllEqual(dy, x)
@test_util.assert_no_new_tensors
def testNonEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1])
x = 2.0
y = 3.0
val, grads = val_and_grad_fn(x, y)
self.assertAllClose(val, x * y)
self.assertEqual(1, len(grads))
self.assertAllEqual(grads[0], x)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testTensorCopyCPU2GPU2CPU(self):
# forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)
# back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)
def f(a, b):
with context.device('/gpu:0'):
c = math_ops.add(a.gpu(0), b.gpu(0))
return math_ops.add(c.cpu(), constant_op.constant(3.0))
with context.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testGetAttrType(self):
typ = backprop.op_attr_type('Add', 'T')
self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE)
def testGetAttrList(self):
typ = backprop.op_attr_type('MaxPool', 'ksize')
self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT])
def testMakeAttrType(self):
self.assertEqual(dtypes.float32,
backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1))
def testMakeAttrTypeList(self):
self.assertEqual([dtypes.float32],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1]))
def testMulType(self):
def mul(x):
return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access
self.assertAllEqual(
backprop.gradients_function(mul)(3.0)[0].numpy(),
6.0)
def testMakeAttrShape(self):
for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):
expected = tensor_shape.TensorShape(s).as_proto()
actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s)
self.assertEqual(
expected,
actual,
msg=('For shape %r, expected %r != %r actual' % (s, expected,
actual)))
def testMakeAttrShapeList(self):
shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]
self.assertEqual(
[tensor_shape.TensorShape(s).as_proto() for s in shape_list],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list))
def testArgsGradientFunction(self):
def f(*args):
return args[0] * args[0]
grad = backprop.gradients_function(f)
self.assertAllEqual(grad(1.0)[0], 2.0)
def testPartial(self):
def f(x, y):
return x * y
part = functools.partial(f, constant_op.constant(2.0))
self.assertAllEqual(
backprop.gradients_function(part)(constant_op.constant(1.0))[0],
2.0)
def testReturnSameThing(self):
def f(x):
return x, 2 * x
self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0)
@test_util.assert_no_new_tensors
def testExceptionSafety(self):
def f(unused_x):
raise ValueError()
try:
backprop.gradients_function(f)(1.0)
except ValueError:
pass
def real_f(x):
return x * x
self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0)
@test_util.assert_no_new_tensors
def testMultiValueConvertToTensor(self):
x = resource_variable_ops.ResourceVariable(
initial_value=array_ops.constant([1.0]), name='x')
def fn():
a = math_ops.add(x.value(), 1.0)
# Make sure convert_to_tensor works correctly with list of TensorNodes.
b = array_ops.stack([a, a], axis=0)
return math_ops.reduce_mean(b)
grad = backprop.implicit_grad(fn)()[0][0]
self.assertAllEqual([1.0], grad)
def testOutput(self):
def multiout(x):
return x + 2, x * x
x = constant_op.constant([0.0, 1.0, 2.0])
grad = backprop.gradients_function(multiout)(x)[0]
self.assertAllEqual([1.0, 3.0, 5.0], grad)
def testMultiValuePreservesIfNotDiffedAgainst(self):
def tfe_conv2d(timage, tkernel, conv2dstrides):
return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME')
i = constant_op.constant([[[[1.0]]]])
k = constant_op.constant([[[[2.0]]]])
s = [1, 1, 1, 1]
grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0]
self.assertAllEqual([[[[2.0]]]], grad)
def testSameObjectForMultipleArguments(self):
def f(x, y):
return math_ops.multiply(x, y)
g = backprop.gradients_function(f)
def np_g(x, y):
dx, dy = g(x, y)
return [dx.numpy(), dy.numpy()]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x, x))
x = 1.
self.assertAllEqual([1., 1.], np_g(x, x))
x = constant_op.constant([[1.]])
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
x = [[1.]]
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
v = resource_variable_ops.ResourceVariable(
initial_value=1., name='testSameObjectForMultipleArguments.Variable')
self.assertAllEqual([1., 1.], np_g(v, v))
@test_util.assert_no_new_tensors
def testImplicitGradientsCustomGradientAndCachedVariableValue(self):
@custom_gradient.custom_gradient
def my_square(x):
result = math_ops.square(x)
def grad(dr):
return 2 * dr * x + 1
return result, grad
x = resource_variable_ops.ResourceVariable(
initial_value=3., name='X.' + self.id())
def f():
return my_square(x)
g = backprop.implicit_grad(f)
grads_and_vars = g()
self.assertEqual(1, len(grads_and_vars))
grad, var = grads_and_vars[0]
self.assertAllEqual(7, grad)
self.assertAllEqual(x, var)
def testJacobianCustomGradient(self):
class MyCallable(object):
def __init__(self):
self.a = variables.Variable(1.)
self.b = variables.Variable(2.)
self.c = variables.Variable(3.)
def __call__(self, x):
return self.a * x * x + self.b * x + self.c
@def_function.function
def call(c, x):
@custom_gradient.custom_gradient
def _call():
y = c(x)
def grad(dy, variables=None): # pylint: disable=redefined-outer-name
with backprop.GradientTape(persistent=True) as g:
g.watch(variables)
y = c(x)
grad_vars = [
2 * math_ops.reduce_sum(dy * g.jacobian(y, v)) for v in variables
]
del g
return (), grad_vars
return y, grad
return _call()
c = MyCallable()
x = constant_op.constant([1., 2., 3.])
with backprop.GradientTape(persistent=True) as g:
g.watch([c.a, c.b, c.c])
y = call(c, x)
self.assertAllEqual(g.gradient(y, x), None)
@test_util.assert_no_new_tensors
def testCustomGradient(self):
@custom_gradient.custom_gradient
def my_mul(x, y):
result = x*y
def grad(dr):
return [dr*y, dr*x]
return result, grad
lr = 0.25
x = resource_variable_ops.ResourceVariable(2., name='x')
def loss(x):
return my_mul(2., x.read_value())
loss_grads_fn = backprop.implicit_val_and_grad(loss)
losses = []
for _ in range(5):
loss, grads_and_vars = loss_grads_fn(x)
losses.append(loss.numpy())
for (grad, var) in grads_and_vars:
var.assign_sub(lr*grad)
self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.])
@test_util.assert_no_new_tensors
def testCustomGradientIdentity(self):
@custom_gradient.custom_gradient
def my_identity(x):
def grad(dresult):
return [2 * dresult]
return x, grad
self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0)
def testDifferentiatingFunctionThatReturnsNone(self):
def fn(x, y):
result = x*y # pylint: disable=unused-variable
x = constant_op.constant(1)
y = constant_op.constant(2)
loss_grads_fn = backprop.implicit_val_and_grad(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
loss_grads_fn(x, y)
val_and_grads_fn = backprop.val_and_grad_function(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
def testZerosCacheDoesntLeakAcrossGraphs(self):
with ops.Graph().as_default():
def get_grad():
with ops.Graph().as_default(), self.cached_session():
t = constant_op.constant(1, dtype=dtypes.float32, shape=(10, 4))
x = constant_op.constant(2, dtype=dtypes.float32, shape=(10, 4))
with backprop.GradientTape() as tape:
tape.watch(x)
x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
y1 = x1**2
y = array_ops.concat([y1, t], axis=1)
return self.evaluate(tape.gradient(y, x))
grad1 = get_grad()
grad2 = get_grad()
self.assertAllEqual(grad1, grad2)
@test_util.run_in_graph_and_eager_modes
def testSelectivelyWatchVariables(self):
x1 = resource_variable_ops.ResourceVariable(1.0)
x2 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x2)
y = x1**2
z = x2**3
self.assertTupleEqual(tape.watched_variables(), (x2,))
dy, dz = tape.gradient([y, z], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertIsNone(dy)
self.assertEqual(self.evaluate(dz), 3.0)
@test_util.run_in_graph_and_eager_modes
def testDifferentiatingScalarCache(self):
# In the following test, if x2 = x1 (i.e the objects are the exact same),
# then y is essentially, 2*x1, and dy/dx1 = 2.
# When we had a pure scalar cache in eager, this would be the case. This
# test prevents us from going back to that case.
with backprop.GradientTape(persistent=False) as g:
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.0)
g.watch(x1)
g.watch(x2)
y = x1 + x2
grad = g.gradient(target=y, sources=[x1])
self.assertEqual(self.evaluate(grad), [1.0])
def testVariablesAndConstantsProduceTheSameGradients(self):
# In the following test, differentiating [y, z] against [a, b] gives:
# (dy/da + dz/da, dy/db + dz/db).
# If a and b are the same constant, dz/da will not be 0 (which it should
# be).
# This is solved by using variable since doing a read_value on a tensor will
# produce a new tensor and corresponding TensorHandle, and not reuse the
# same tensor (which would happen if we are using a cache and reusing
# EagerTensor objects).
def get_grads(a, b):
with backprop.GradientTape() as tape:
tape.watch([a, b])
y = a**3
z = b**2
return tape.gradient([y, z], [a, b])
gradients_constants = get_grads(
constant_op.constant(2.0), constant_op.constant(2.0))
gradients_variables = get_grads(
resource_variable_ops.ResourceVariable(2.0),
resource_variable_ops.ResourceVariable(2.0))
self.assertAllEqual(gradients_constants, gradients_variables)
def testUnknownShapes(self):
with ops.Graph().as_default():
with backprop.GradientTape() as tape:
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
tape.watch(a)
b = a**3
db_da = tape.gradient(b, a)
with self.cached_session() as sess:
self.assertEqual((8.0, 12.0), sess.run((b, db_da), feed_dict={a: 2.0}))
@test_util.run_in_graph_and_eager_modes
def testCustomGradientInEagerAndGraph(self):
@custom_gradient.custom_gradient
def f(x):
y = x * x
def grad(dy):
return [4 * dy]
return y, grad
with backprop.GradientTape() as t:
c = constant_op.constant(1.0)
t.watch(c)
g = f(c)
self.assertAllEqual(self.evaluate(t.gradient(g, c)), 4.0)
@test_util.run_in_graph_and_eager_modes
def testMaxPooling3DGradient(self):
def forward(a):
r = max_pooling3d(a, pool_size=pool_size, strides=strides, padding='SAME')
return r
input_sizes = [1, 3, 2, 4, 1]
pool_size = (2, 2, 1)
strides = (1, 1, 1)
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32)
aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
da = backprop.gradients_function(forward)(aa)
if not context.executing_eagerly():
tf_aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
tf_max = max_pooling3d(
tf_aa, pool_size=pool_size, strides=strides, padding='SAME')
tf_da = gradients.gradients(tf_max, [tf_aa])
self.assertAllEqual(da[0], tf_da[0].eval())
class JacobianTest(test.TestCase):
def _jacobian(self, experimental_use_pfor):
persistent = context.executing_eagerly and not experimental_use_pfor
with backprop.GradientTape(persistent=persistent) as g:
x = constant_op.constant([1., 2.])
y = constant_op.constant([3., 4.])
g.watch(x)
g.watch(y)
z = x * x * y
jacobian = g.jacobian(z, [x, y],
experimental_use_pfor=experimental_use_pfor)
answer = [array_ops.diag(2 * x * y), array_ops.diag(x * x)]
return jacobian, answer
@test_util.run_v1_only('b/120545219')
def testPfor(self):
jacobian, answer = self._jacobian(experimental_use_pfor=True)
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testWhileLoop(self):
jacobian, answer = self._jacobian(experimental_use_pfor=False)
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testPforDefun(self):
@function.defun
def _f():
return self._jacobian(experimental_use_pfor=True)
jacobian, answer = _f()
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testWhileLoopDefun(self):
@function.defun
def _f():
return self._jacobian(experimental_use_pfor=False)
jacobian, answer = _f()
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self):
if not context.executing_eagerly():
return
with backprop.GradientTape() as g:
x = constant_op.constant([1.0, 2.0])
g.watch(x)
y = x * x
with self.assertRaisesRegexp(RuntimeError, 'persistent'):
g.jacobian(y, x, experimental_use_pfor=False)
@test_util.run_v1_only('b/120545219')
def testPforException(self):
var = variables.Variable([1.])
@custom_gradient.custom_gradient
def op(x):
def grad(_):
# Note that we perform a stateful operation here that will not be
# compatible with parallel for construct.
with ops.control_dependencies(
[var.assign(random_ops.random_uniform([1]))]):
return constant_op.constant(1.)
return x, grad
with backprop.GradientTape() as g:
x = constant_op.constant([1., 2.])
g.watch(x)
y = op(x)
with self.assertRaisesRegexp(ValueError, 'No converter'):
g.jacobian(y, x, experimental_use_pfor=True)
@test_util.run_v1_only('b/120545219')
def test_parallel_iterations(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant([[1., 2], [3, 4]])
g.watch(x)
y = math_ops.matmul(x, x)
self.assertAllClose(g.jacobian(y, x, parallel_iterations=2),
g.jacobian(y, x, parallel_iterations=3))
@test_util.run_in_graph_and_eager_modes
def test_nested_jacobian(self):
if context.executing_eagerly():
# TODO(agarwal): b/128842926
self.skipTest('Conversion of function calls not implemented yet.')
x = array_ops.ones((10, 2))
with backprop.GradientTape(persistent=False) as g:
g.watch(x)
with backprop.GradientTape(persistent=False) as gg:
gg.watch(x)
y = math_ops.reduce_sum(math_ops.square(x))
dy_x = gg.jacobian(y, x)
dy_xx = g.batch_jacobian(dy_x, x)
dy_xx_answer = [[[2., 0], [0, 2.]]] * 10
self.assertAllClose(dy_xx_answer, self.evaluate(dy_xx))
@test_util.run_all_in_graph_and_eager_modes
class BatchJacobianTest(test.TestCase):
def _batch_jacobian(self, experimental_use_pfor):
persistent = context.executing_eagerly and not experimental_use_pfor
with backprop.GradientTape(persistent=persistent) as g:
x = constant_op.constant([[1., 2.], [3., 4.]])
y = constant_op.constant([[3., 4.], [5., 6.]])
g.watch(x)
z = x * x * y
batch_jacobian = g.batch_jacobian(
z, x, experimental_use_pfor=experimental_use_pfor)
answer = array_ops.stack([array_ops.diag(2 * x[0] * y[0]),
array_ops.diag(2 * x[1] * y[1])])
return batch_jacobian, answer
def testPfor(self):
batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=True)
self.assertAllEqual(answer, batch_jacobian)
def testWhileLoop(self):
batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=False)
self.assertAllEqual(answer, batch_jacobian)
def testPforDefun(self):
@function.defun
def _f():
return self._batch_jacobian(experimental_use_pfor=True)
batch_jacobian, answer = _f()
self.assertAllEqual(answer, batch_jacobian)
def testWhileLoopDefun(self):
@function.defun
def _f():
return self._batch_jacobian(experimental_use_pfor=False)
batch_jacobian, answer = _f()
self.assertAllEqual(answer, batch_jacobian)
def testPersistentTape(self):
if not context.executing_eagerly():
return
with backprop.GradientTape() as g:
x = constant_op.constant([[1.0, 2.0]])
g.watch(x)
y = x * x
with self.assertRaisesRegexp(RuntimeError, 'persistent'):
g.batch_jacobian(y, x, experimental_use_pfor=False)
def testBadShape(self):
x = random_ops.random_uniform([2, 3])
with backprop.GradientTape() as g:
y = array_ops.concat([x, x], axis=0)
with self.assertRaisesRegexp(ValueError, 'Need first dimension'):
g.batch_jacobian(y, x)
def testBadInputRank(self):
x = random_ops.random_uniform([2])
with backprop.GradientTape() as g:
y = random_ops.random_uniform([2, 2])
with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'):
g.batch_jacobian(y, x)
def testBadOutputRank(self):
x = random_ops.random_uniform([2, 2])
with backprop.GradientTape() as g:
y = random_ops.random_uniform([2])
with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'):
g.batch_jacobian(y, x)
def testPforException(self):
var = variables.Variable([1.])
@custom_gradient.custom_gradient
def op(x):
def grad(_):
# Note that we perform a stateful operation here that will not be
# compatible with parallel for construct.
with ops.control_dependencies(
[var.assign(random_ops.random_uniform([1]))]):
return constant_op.constant(1.)
return x, grad
with backprop.GradientTape() as g:
x = constant_op.constant([[1.], [2.]])
g.watch(x)
y = op(x)
with self.assertRaisesRegexp(ValueError, 'No converter'):
g.batch_jacobian(y, x, experimental_use_pfor=True)
def test_parallel_iterations(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant([[1., 2], [3, 4]])
g.watch(x)
w = constant_op.constant([[1., 2, 3, 4], [5, 6, 7, 8]])
y = math_ops.matmul(x, w)
self.assertAllClose(g.batch_jacobian(y, x, parallel_iterations=2),
g.batch_jacobian(y, x, parallel_iterations=3))
class AggregateIndexedSlicesGradientsTest(test_util.TensorFlowTestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def testNoGradients(self):
self.assertIsNone(backprop.aggregate_indexed_slices_gradients([]))
def testOneGradient(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
result = backprop.aggregate_indexed_slices_gradients([t])
self._assert_indexed_slices_equal(t, result)
def testMultipleGradients(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop.aggregate_indexed_slices_gradients([t0, t1])
self._assert_indexed_slices_equal(total, result)
def testMultipleGradientsWithNones(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
t3 = None
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop.aggregate_indexed_slices_gradients([t0, t1, t3])
self._assert_indexed_slices_equal(total, result)
def testMixedTensorAndIndexedSlices(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]])
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop.aggregate_indexed_slices_gradients([t0, t1])
self._assert_indexed_slices_equal(total, result)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/backprop_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lift_to_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
class LiftToGraphTest(test.TestCase):
def testCaptureOrdering(self):
v1 = resource_variable_ops.ResourceVariable(1.0)
v2 = resource_variable_ops.ResourceVariable(2.0)
v3 = resource_variable_ops.ResourceVariable(3.0)
@def_function.function
def fn():
return v1 + v2 + v3
concrete_fn = fn.get_concrete_function()
original_captures = concrete_fn.graph.captures
outputs = concrete_fn.graph.outputs
for _ in range(100):
g = func_graph.FuncGraph('lifted')
lift_to_graph.lift_to_graph(
outputs, g, add_sources=True, handle_captures=True)
lifted_captures = g.captures
self.assertLen(lifted_captures, 3)
for original_capture, lifted_capture in zip(original_captures.values(),
lifted_captures.values()):
self.assertEqual(original_capture.name, lifted_capture.name)
def testClassAttrsRemoved(self):
"""Tests that _class attrs (from colocate_with()) are removed."""
@def_function.function
def fn():
two = constant_op.constant(2.0, name='two')
ten = constant_op.constant(10.0, name='ten')
twenty = math_ops.multiply(two, ten, name='twenty')
three = constant_op.constant(3.0, name='three')
with framework_ops.colocate_with(twenty):
thirty = math_ops.multiply(three, ten, name='thirty')
return ten, twenty, thirty
concrete_fn = fn.get_concrete_function()
self.assertItemsEqual( # Before lifting, 'fn' has colocation attrs.
concrete_fn.graph.get_operation_by_name('thirty').colocation_groups(),
[compat.as_bytes('loc:@twenty')])
thirty_out = concrete_fn.graph.outputs[2]
g = func_graph.FuncGraph('lifted')
lift_to_graph.lift_to_graph([thirty_out], g)
# After lifting, colocation attrs are gone.
ops = g.get_operations()
self.assertItemsEqual([op.name for op in ops],
['three', 'ten', 'thirty', # Lifted from `fn` body.
thirty_out.op.name]) # Wrapper for output.
for op in ops:
with self.assertRaises(ValueError):
class_attr = op.get_attr('_class') # Expected not to exist.
print('Unexpected class_attr', class_attr, 'on', op.name)
self.assertItemsEqual(op.colocation_groups(), # Expect default self-ref.
[compat.as_bytes('loc:@%s' % op.name)])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/lift_to_graph_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DefFunctionTests(xla_test.XLATestCase):
def testVarInitializedInFunction(self):
with self.test_scope():
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/eager/def_function_xla_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing tfe code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops as _ops
from tensorflow.python.platform import test as _test
from tensorflow.python.platform.test import * # pylint: disable=wildcard-import
# TODO(akshayka): Do away with this file.
def main(argv=None): # pylint: disable=function-redefined
_ops.enable_eager_execution()
_test.main(argv)
|
tensorflow-master
|
tensorflow/python/eager/test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import forwardprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
# TODO(allenl): Move this somewhere useful once forward gradients are stable.
def _jvp(f, primals, tangents):
"""Compute the jacobian of `f` at `primals` multiplied by `tangents`."""
with forwardprop.ForwardGradientAccumulator() as acc:
acc.watch(primals, tangents)
primals_out = f(*primals)
return primals_out, acc.jvp(primals_out)
def _jacfwd(f, primals):
"""Compute the jacobian of `f` at `primals` using forward-mode autodiff."""
jac_flat = []
flat_primals = nest.flatten(primals)
tangent_mask = [array_ops.zeros_like(primal) for primal in flat_primals]
for primal_index, primal in enumerate(flat_primals):
primal_vector = array_ops.reshape(primal, [-1])
primal_vector_length = array_ops.size(primal_vector)
jac_columns = []
for element_index in math_ops.range(primal_vector_length):
mask = array_ops.one_hot(element_index, primal_vector_length)
tangent_mask[primal_index] = array_ops.reshape(mask,
array_ops.shape(primal))
jac_columns.append(
nest.map_structure(
functools.partial(array_ops.reshape, shape=[-1]),
_jvp(f, primals, tangent_mask)[1]))
jac_flat.append(array_ops.stack(jac_columns, axis=1))
tangent_mask[primal_index] = array_ops.zeros_like(primal)
return nest.pack_sequence_as(primals, jac_flat)
def _grad(f, argnums=0):
"""Return a function which computes the gradient of `f`."""
def _f(*params):
with backprop.GradientTape() as tape:
tape.watch(params)
primals_out = f(*params)
return tape.gradient(primals_out, params[argnums])
return _f
def _hvp(f, primals, tangents):
"""Compute a forward-over-back Hessian-vector product."""
return _jvp(_grad(f), primals, tangents)[1]
def _test_gradients(testcase,
f,
primals,
order,
delta=1e-3,
rtol=1e-2,
atol=1e-6):
"""Tests forward/backward jacobians of `f`'s [0, `order`)-order gradients."""
if order < 1:
raise ValueError("`order` should be a positive integer, got '{}'."
.format(order))
if order > 1:
_test_gradients(
testcase=testcase,
f=_grad(f),
primals=primals,
order=order - 1,
delta=delta,
rtol=rtol,
atol=atol)
sym_jac_back, num_jac = gradient_checker_v2.compute_gradient(
f, primals, delta=delta)
testcase.assertAllClose(num_jac, sym_jac_back, rtol=rtol, atol=atol)
# TODO(b/134972215): compute_gradient should use the definition of a Jacobian
# matrix on Wikipedia, then this transpose can go away.
sym_jac_fwd = nest.map_structure(array_ops.transpose, _jacfwd(f, primals))
testcase.assertAllClose(num_jac, sym_jac_fwd, rtol=rtol, atol=atol)
# And the symbolic computations should be much closer.
testcase.assertAllClose(sym_jac_back, sym_jac_fwd)
class ForwardpropTest(test.TestCase):
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMultipleWatchesAdd(self):
x = constant_op.constant(-2.)
with forwardprop.ForwardGradientAccumulator() as acc:
acc.watch(x, constant_op.constant(10.))
self.assertAllClose(10., acc.jvp(x))
acc.watch(x, constant_op.constant(11.))
self.assertAllClose(21., acc.jvp(x))
y = constant_op.constant(3.) * x
self.assertAllClose(21., acc.jvp(x))
self.assertAllClose(21. * 3., acc.jvp(y))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testDeadTensorsJVPCleared(self):
x = array_ops.ones([100])
x_weak = weakref.ref(x)
grad_tensor = constant_op.constant(array_ops.zeros([100]))
grad_tensor_weak = weakref.ref(grad_tensor)
with forwardprop.ForwardGradientAccumulator() as acc:
acc.watch(x, grad_tensor)
derived_tensor = constant_op.constant(2.) * x
del grad_tensor
self.assertAllClose(array_ops.zeros([100]), acc.jvp(x))
del x
self.assertIsNone(x_weak())
self.assertIsNone(grad_tensor_weak())
derived_tensor_weak = weakref.ref(derived_tensor)
derived_tensor_grad = acc.jvp(derived_tensor)
derived_tensor_grad_weak = weakref.ref(derived_tensor_grad)
del derived_tensor
del derived_tensor_grad
self.assertIsNone(derived_tensor_weak())
self.assertIsNone(derived_tensor_grad_weak())
@test_util.assert_no_new_tensors
def testJVPManual(self):
primal, tangent = _jvp(math_ops.sin, (constant_op.constant(0.1),),
(constant_op.constant(0.2),))
self.assertAllClose(math_ops.sin(0.1), primal)
self.assertAllClose(math_ops.cos(0.1) * 0.2, tangent)
@test_util.assert_no_new_tensors
def testNumericHigherOrder(self):
def f(x):
pointwise = math_ops.sin(x) * math_ops.tan(x)
return math_ops.reduce_prod(
pointwise + math_ops.reduce_sum(pointwise), axis=1)
_test_gradients(
self, f, [constant_op.constant([[2.0, 3.0], [1.0, 4.0]])], order=3)
@test_util.assert_no_new_tensors
def testCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
def grad(dy):
return dy * math_ops.cos(x)
return np.sin(x.numpy()), grad
_test_gradients(self, f, [constant_op.constant([1., 2.])], order=3)
@test_util.assert_no_new_tensors
def testCustomGradientRecomputeGrad(self):
@custom_gradient.recompute_grad
def f(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
_test_gradients(self, f, [constant_op.constant([1.])], order=3)
def testFunctionGrad(self):
@def_function.function
def f(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
_test_gradients(
self,
f,
[constant_op.constant([1., 2.])],
# TODO(allenl): figure out why functions aren't N times differentiable
order=1)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testHVPMemory(self):
def fun(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
primals = constant_op.constant([1., 2., 3.])
tangents = constant_op.constant([3., 4., 5.])
_hvp(fun, (primals,), (tangents,))
@test_util.assert_no_new_tensors
def testHVPCorrectness(self):
def fun(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
primals = constant_op.constant([1., 2., 3.])
tangents = constant_op.constant([3., 4., 5.])
forwardback_hvp_eager = _hvp(fun, (primals,), (tangents,))
forwardback_hvp_function = def_function.function(_hvp)(fun, (primals,),
(tangents,))
with backprop.GradientTape(persistent=True) as g:
g.watch(primals)
with backprop.GradientTape() as gg:
gg.watch(primals)
out = fun(primals)
grad = array_ops.unstack(gg.gradient(out, primals))
hessian = []
for i in range(3):
hessian.append(g.gradient(grad[i], primals))
hessian = array_ops.stack(hessian, axis=0)
backback_hvp = math_ops.tensordot(hessian, tangents, axes=1)
self.assertAllClose(backback_hvp, forwardback_hvp_eager)
self.assertAllClose(backback_hvp, forwardback_hvp_function)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/eager/forwardprop_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return constant_op.constant(1.).device
class TFETest(test_util.TensorFlowTestCase):
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertIsNone(ctx.summary_writer)
ctx.summary_writer = 'mock'
self.assertEqual('mock', ctx.summary_writer)
self.assertIsNone(ctx.summary_recording)
ctx.summary_recording = 'mock'
self.assertEqual('mock', ctx.summary_recording)
self.assertIsNone(ctx.summary_step)
ctx.summary_step = 'mock'
self.assertEqual('mock', ctx.summary_step)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testRunMetadata(self):
context.enable_run_metadata()
t = constant_op.constant(1.0)
_ = t + t # Runs an operation which will be in the RunMetadata
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
step_stats = run_metadata.step_stats
self.assertGreater(len(step_stats.dev_stats), 0)
cpu_stats = step_stats.dev_stats[0]
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
cpu_stats.device)
self.assertGreaterEqual(len(cpu_stats.node_stats), 1)
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('gpu:0'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.summary_writer,
ctx.summary_recording,
ctx.summary_step,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegexp(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.async_wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.async_wait()
context.async_clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
test_var = variables.Variable([2., 3.])
self.assertAllEqual(test_fn(test_var), 1.0)
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
context.set_execution_mode(context.ASYNC)
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))
context.async_wait()
context.async_clear_error()
context.context().execution_mode = context.SYNC
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = constant_op.constant(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertEqual(cache.get('1'), None)
cache.put('2', array_ops.zeros((2)))
self.assertNotEqual(cache.get('2'), None)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/core_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for eager profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import trace_events_pb2
from tensorflow.python.eager import profiler
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
class ProfilerTest(test_util.TensorFlowTestCase):
def test_profile(self):
profiler.start()
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
with self.assertRaises(profiler.ProfilerAlreadyRunningError):
profiler.start()
profile_result = profiler.stop()
profile_pb = trace_events_pb2.Trace()
profile_pb.ParseFromString(profile_result)
profile_pb_str = '%s' % profile_pb
self.assertTrue('Mul' in profile_pb_str)
with self.assertRaises(profiler.ProfilerNotRunningError):
profiler.stop()
def test_save_profile(self):
logdir = self.get_temp_dir()
profile_pb = trace_events_pb2.Trace()
profile_result = profile_pb.SerializeToString()
profiler.save(logdir, profile_result)
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
for file_name in gfile.ListDirectory(logdir):
if gfile.IsDirectory(os.path.join(logdir, file_name)):
self.assertEqual(file_name, 'plugins')
else:
self.assertTrue(file_name.endswith('.profile-empty'))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/profiler_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for low-level eager execution primitives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python import keras
class Tests(test.TestCase):
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_MatMulCorrectResponse(self):
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
a_100_by_784 = random_ops.random_uniform((100, 784))
b_100_by_784 = random_ops.random_uniform((100, 784))
ctx = context.context()
ctx.ensure_initialized()
self.assertAllClose(
math_ops.matmul(a_2_by_2, b_2_by_2),
pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2,
b_2_by_2, "transpose_a", False, "transpose_b", False))
self.assertAllClose(
math_ops.matmul(a_100_by_784, b_100_by_784, transpose_b=True),
pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_100_by_784,
b_100_by_784, "transpose_a", False, "transpose_b", True))
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_ResourceVariableMatMulCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
x = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a",
False, "transpose_b", False)
y = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2, a_2_by_2,
"transpose_a", False, "transpose_b", False)
self.assertAllEqual(x, y)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_TapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
tape.watch(a_2_by_2)
z = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2,
a_2_by_2, "transpose_a", False, "transpose_b", False)
dz_dy = tape.gradient(z, [a_2_by_2])[0]
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_ResourceVariableTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
tape.watch(m)
z = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, m, m,
"transpose_a", False, "transpose_b", False)
dz_dy = tape.gradient(z, [m])[0]
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_AddNCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
self.assertAllClose(
math_ops.add_n([a_2_by_2, b_2_by_2]),
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"AddN", None, None,
[a_2_by_2, b_2_by_2]))
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_AddNTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
tape.watch(b_2_by_2)
z1 = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "AddN", None, None,
[a_2_by_2, b_2_by_2])
z2 = math_ops.add_n([a_2_by_2, b_2_by_2])
dz1_dy = tape.gradient(z1, [a_2_by_2])[0]
dz2_dy = tape.gradient(z2, [a_2_by_2])[0]
self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy())
# Tests heterogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_IdentityNCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
self.assertAllClose(
array_ops.identity_n([a_2_by_2, b_2_by_2]),
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"IdentityN", None, None,
[a_2_by_2, b_2_by_2]))
# Tests heterogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_IdentityNTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
tape.watch(b_2_by_2)
z1 = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "IdentityN", None, None,
[a_2_by_2, b_2_by_2])
z2 = array_ops.identity_n([a_2_by_2, b_2_by_2])
dz1_dy = tape.gradient(z1[0], [a_2_by_2])[0]
dz2_dy = tape.gradient(z2[0], [a_2_by_2])[0]
self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_InvalidInputs(self):
a_2_by_2 = random_ops.random_uniform((2, 2))
ctx = context.context()
ctx.ensure_initialized()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
ctx_handle = ctx._handle # pylint: disable=protected-access
# Not enough base params
with self.assertRaisesRegexp(ValueError,
"at least 5 items in the input tuple"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
"Identity")
# Not enough inputs
with self.assertRaisesRegexp(ValueError,
"Expected to be at least 6, was 5"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx_handle,
"Identity", None, [])
# Bad type
with self.assertRaisesRegexp(TypeError, "expected a string for op_name"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
ctx_handle, None, [], a_2_by_2)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastPathExecute_InvalidAttributes(self):
split_dim = constant_op.constant(0, dtype=dtypes.int32)
value = constant_op.constant([0, 1, 2, 3], dtype=dtypes.float32)
ctx = context.context()
ctx.ensure_initialized()
ctx_handle = ctx._handle
with self.assertRaises(core._FallbackException):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
"Split", None, None, split_dim,
value, "num_split", -1)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testInvalidNumOutputs(self):
with self.assertRaisesRegexp(
Exception,
"Value for attr 'num_split' of -1 must be at least minimum 1"):
array_ops.split(value=[1, 2, 3], num_or_size_splits=-1)
with self.assertRaisesRegexp(
Exception,
"Value for attr 'num_split' of 0 must be at least minimum 1"):
array_ops.split(value=[1, 2, 3], num_or_size_splits=0)
def testIsFunction(self):
ctx = context.context()
self.assertFalse(ctx.has_function("not_a_function"))
@def_function.function
def f():
return 1.
self.assertTrue(ctx.has_function(f.get_concrete_function().name))
def testEagerExecute_InvalidType(self):
# Test case for GitHub issue 26879.
value = keras.layers.Input((128, 128, 1), dtype="float32")
with self.assertRaisesRegexp(TypeError,
"Expected list for 'values' argument"):
_ = array_ops.stack(value, axis=1)
def testGraphResourceVariableRaisesFallback(self):
with ops.Graph().as_default():
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
ctx = context.context()
ctx.ensure_initialized()
with self.assertRaises(core._FallbackException):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", None, None, m, m,
"transpose_a", False,
"transpose_b", False)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/eager/pywrap_tfe_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for remote execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
class SingleWorkerTest(test.TestCase):
def setUp(self):
super(SingleWorkerTest, self).setUp()
workers, _ = test_util.create_local_cluster(1, 0)
remote.connect_to_remote_host(workers[0].target)
def testMultiDeviceFunctionBasic(self):
@def_function.function
def basic(i):
with ops.device('/job:localhost/replica:0/task:0/cpu:0'):
a = constant_op.constant([2]) + i
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
b = constant_op.constant([1])
return a + b
self.assertAllEqual(basic(constant_op.constant([2])).numpy(), [5])
self.assertAllEqual(basic(constant_op.constant([1])).numpy(), [4])
def testMultiDeviceFunctionVariable(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def with_variable(i):
return i + variable_b
self.assertAllEqual(with_variable(constant_op.constant([2])).numpy(), [3])
def testMultiDeviceFunctionRemoteOutput(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def remote_output(i):
return variable_b, i + variable_b
with self.assertRaises(errors.UnimplementedError) as cm:
remote_output(constant_op.constant([1]))
self.assertIn(
'Currently, outputting tensors on remote devices is not supported.',
cm.exception.message)
def testMultiDeviceFunctionAmbiguousDevice(self):
@def_function.function
def ambiguous_device(i):
with ops.device('cpu:0'):
return i + constant_op.constant([2])
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
self.assertAllEqual(
ambiguous_device(constant_op.constant([2])).numpy(), [3])
self.assertIn('the output node must match exactly one device',
cm.exception.message)
class MultiWorkersTest(test.TestCase):
def setUp(self):
super(MultiWorkersTest, self).setUp()
workers, _ = test_util.create_local_cluster(3, 0)
remote.connect_to_remote_host(
[workers[0].target, workers[1].target, workers[2].target])
def testMultiDeviceFunctionOnRemoteDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
c = a + 1.0
return c
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testSimpleParameterServer(self):
with ops.device('/job:worker/task:2/device:CPU:0'):
v1 = variables.Variable(initial_value=0)
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/remote_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""State management for eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import random
import threading
import numpy as np
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_starting_device_spec = pydev.DeviceSpec.from_string("")
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
_tf2_gauge = monitoring.BoolGauge("/tensorflow/api/tf2_enable",
"Whether tf2.enable() is called.")
_tf2_gauge.get_cell().set(tf2.enabled())
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data = {}
class FunctionCallOptions(object):
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or
a serialized string of that proto.
The config used by Grappler when optimizing the function graph.
Each concrete function is optimized the first time is called. Changing
config_proto after the first call has no effect.
If config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString()
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString())
else:
raise ValueError("the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config)))
class _ThreadLocalData(threading.local):
"""Thread local storage for the eager context."""
def __init__(self):
super(_ThreadLocalData, self).__init__()
self.device_spec = _starting_device_spec
self.device_name = ""
self.mode = default_execution_mode
self.is_eager = default_execution_mode == EAGER_MODE
self.scope_name = ""
self.summary_writer = None
self.summary_recording = None
self.summary_recording_distribution_strategy = True
self.summary_step = None
self.scalar_cache = {}
self._ones_rank_cache = None
self._zeros_cache = None
self.execution_mode = SYNC
self.function_call_options = None
@property
def ones_rank_cache(self):
if not self._ones_rank_cache:
self._ones_rank_cache = _EagerTensorCache()
return self._ones_rank_cache
@property
def zeros_cache(self):
if not self._zeros_cache:
self._zeros_cache = _EagerTensorCache()
return self._zeros_cache
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn",
"device_stack"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode,
device_stack=None)
def push(self, is_building_function, enter_context_fn, device_stack):
"""Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this
graph. When breaking out of graphs in init_scope, the innermost nonempty
device stack is used. Eager contexts put `None` here and the value is
never used.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn, device_stack))
def pop(self):
"""Pop the stack."""
self.stack.pop()
class LogicalDevice(
collections.namedtuple("LogicalDevice", ["name", "device_type"])):
"""Abstraction for a device initialized by the runtime.
A LogicalDevice corresponds to a initialized instance on a PhysicalDevice or a
remote device available in the cluster. Tensors and operations can be placed
on a specific LogicalDevice by calling `tf.device()` with the `name` of the
LogicalDevice.
Fields:
name: The fully qualified name of the device. Can be used for Op or function
placement.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
@tf_export("config.experimental.VirtualDeviceConfiguration")
class VirtualDeviceConfiguration(
collections.namedtuple("VirtualDeviceConfiguration", ["memory_limit"])):
"""Configuration class for virtual devices for a PhysicalDevice.
Fields:
memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual
device. Currently only supported for GPUs.
"""
def __new__(cls, memory_limit=None):
return super(VirtualDeviceConfiguration, cls).__new__(cls, memory_limit)
class PhysicalDevice(
collections.namedtuple("PhysicalDevice", ["name", "device_type"])):
"""Abstraction for a locally visible physical device.
TensorFlow can utilize various devices such as the CPU or multiple GPUs
for computation. Before initializing a local device for use, the user can
customize certain properties of the device such as it's visibility or memory
configuration.
Once a PhysicalDevice is initialized one or many LogicalDevice objects are
created. Use tf.config.set_virtual_device_configuration() to create multiple
LogicalDevice objects for a PhysicalDevice. This is useful when separation
between models is needed.
Fields:
name: Unique identifier for device.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to DEVICE_PLACEMENT_SILENT.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
self._config = config
self._thread_local_data = _ThreadLocalData()
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._post_execution_callbacks = []
self._seed = None
self._initialize_lock = threading.Lock()
if device_policy is None:
device_policy = DEVICE_PLACEMENT_SILENT
self._device_policy = device_policy
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._execution_mode = execution_mode
self._server_def = server_def
self._collective_ops_server_def = None
self._collective_leader = None
self._collective_scoped_allocator_enabled_ops = None
self._collective_use_nccl_communication = None
self._collective_device_filters = None
self._device_lock = threading.Lock()
self._physical_devices = None
self._visible_device_list = []
self._memory_growth_map = None
self._virtual_device_map = {}
# Values set after construction
self._optimizer_jit = None
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
self._soft_device_placement = None
self._log_device_placement = None
self._optimizer_experimental_options = {}
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
# `random.Random(seed)` needs `seed` to be hashable, while values of type
# e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them
# to int.
try:
hash(seed)
except TypeError:
seed = int(np.array(seed))
self._rng = random.Random(seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_logical_devices(self):
"""Helper to initialize devices."""
# Store list of devices
self._logical_devices = []
self._context_devices = []
device_list = pywrap_tensorflow.TFE_ContextListDevices(
self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)):
dev_name = pywrap_tensorflow.TF_DeviceListName(device_list, i)
self._context_devices.append(pydev.canonical_name(dev_name))
spec = pydev.DeviceSpec.from_string(dev_name)
self._logical_devices.append(
LogicalDevice(name=dev_name, device_type=spec.device_type))
dev_type = pywrap_tensorflow.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
pywrap_tensorflow.TF_DeleteDeviceList(device_list)
def ensure_initialized(self):
"""Initialize handle and devices if not already done so."""
with self._initialize_lock:
if self._context_handle is not None:
return
assert self._context_devices is None
opts = pywrap_tensorflow.TFE_NewContextOptions()
try:
config_str = self.config.SerializeToString()
pywrap_tensorflow.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._execution_mode == ASYNC:
pywrap_tensorflow.TFE_ContextOptionsSetAsync(opts, True)
self._context_handle = pywrap_tensorflow.TFE_NewContext(opts)
finally:
pywrap_tensorflow.TFE_DeleteContextOptions(opts)
assert not (self._server_def and self._collective_ops_server_def), (
"Cannot enable remote execution as well as collective ops at the "
"moment. If this is important to you, please file an issue.")
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle, 600,
server_def_str)
elif self._collective_ops_server_def is not None:
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tensorflow.TFE_EnableCollectiveOps(self._context_handle,
server_def_str)
self._initialize_logical_devices()
def _clear_caches(self):
self.scalar_cache().clear()
self.ones_rank_cache().flush()
self.zeros_cache().flush()
def set_server_def(self, server_def, keep_alive_secs=600):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle,
keep_alive_secs, server_def_str)
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
self._initialize_logical_devices()
def enable_collective_ops(self, server_def):
"""Enable distributed collective ops with an appropriate server_def.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
Raises:
ValueError: if server_def is None.
RuntimeError: if this method is not called at program startup.
"""
if not server_def:
raise ValueError("server_def is None.")
if self._context_handle is not None:
raise RuntimeError("Collective ops must be enabled at program startup")
self._collective_ops_server_def = server_def
def configure_collective_ops(
self,
collective_leader="",
scoped_allocator_enabled_ops=("CollectiveReduce",),
use_nccl_communication=False,
device_filters=None):
"""Configure collective ops.
Collective group leader is necessary for collective ops to run, other
configurations are mainly for the purpose of performance.
Args:
collective_leader: a device string for collective leader, e.g.
"/job:worker/replica:0/task:"; empty string means local execution of
collective ops.
scoped_allocator_enabled_ops: a tuple or a list of op names for scoped
allocator to run with.
use_nccl_communication: whether to use nccl communication for collective
ops.
device_filters: a tuple or a list of device strings. If set, corresponding
task can only see the devices filtered by these device filters.
Raises:
RuntimeError: if this method is not called at program startup.
"""
if self._collective_leader is not None:
if (self._collective_leader != collective_leader or
self._collective_scoped_allocator_enabled_ops !=
scoped_allocator_enabled_ops or
self._collective_use_nccl_communication != use_nccl_communication or
self._collective_device_filters != device_filters):
raise ValueError("Collective ops are already configured.")
else:
return
if self._context_handle is not None:
raise RuntimeError("Collective ops must be configured at program startup")
self._collective_leader = collective_leader
self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops
self._collective_use_nccl_communication = use_nccl_communication
self._collective_device_filters = device_filters
@property
def _handle(self):
if self._context_handle is None:
raise AssertionError("Context must be initialized first.")
return self._context_handle
@property
def _devices(self):
if self._context_devices is None:
raise AssertionError("Context must be initialized first.")
return self._context_devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._thread_local_data
old_mode = ctx.mode
old_is_eager = ctx.is_eager
ctx.mode = mode
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode, None)
try:
yield
finally:
ctx.is_eager = old_is_eager
ctx.mode = old_mode
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._thread_local_data.is_eager
def scalar_cache(self):
"""Per-device cache for scalars."""
return self._thread_local_data.scalar_cache
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return self._thread_local_data.ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return self._thread_local_data.zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._thread_local_data.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._thread_local_data.scope_name = s
@property
def summary_writer(self):
"""Returns default summary writer for the current thread."""
return self._thread_local_data.summary_writer
@summary_writer.setter
def summary_writer(self, writer):
"""Sets default summary writer for the current thread."""
self._thread_local_data.summary_writer = writer
@property
def summary_recording(self):
"""Returns summary recording condition."""
return self._thread_local_data.summary_recording
@summary_recording.setter
def summary_recording(self, condition):
"""Sets summary recording condition."""
self._thread_local_data.summary_recording = condition
@property
def summary_recording_distribution_strategy(self):
"""Returns summary recording condition for distribution strategy."""
return self._thread_local_data.summary_recording_distribution_strategy
@summary_recording_distribution_strategy.setter
def summary_recording_distribution_strategy(self, condition):
"""Sets summary recording condition for distribution strategy."""
self._thread_local_data.summary_recording_distribution_strategy = condition
@property
def summary_step(self):
"""Returns summary step variable."""
return self._thread_local_data.summary_step
@summary_step.setter
def summary_step(self, step):
"""Sets summary step variable."""
self._thread_local_data.summary_step = step
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._thread_local_data.device_spec
def _set_device(self, device_name, device_spec):
self._thread_local_data.device_name = device_name
self._thread_local_data.device_spec = device_spec
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Returns:
Context manager that forces device placement.
Raises:
ValueError: If name is not a string or is an invalid device name.
RuntimeError: If device scopes are not properly nested.
"""
return _EagerDeviceContext(self, name)
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
@property
def execution_mode(self):
"""Gets execution mode for current thread."""
# Only get the execution mode from the context if it has already been
# initialized
if self._context_handle is None:
return self._execution_mode
mode = self._thread_local_data.execution_mode
if mode is None:
mode = self._execution_mode
return mode
@execution_mode.setter
def execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
if self._thread_local_data.execution_mode != mode:
self._thread_local_data.execution_mode = mode
# Only set the execution mode if the context has already been initialized
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextSetAsyncForThread(self._context_handle,
mode == ASYNC)
else:
self._execution_mode = mode
@property
def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
# Ensure physical devices have been discovered and config has been imported
self._initialize_physical_devices()
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
# Compute device counts
config.device_count["CPU"] = 0
config.device_count["GPU"] = 0
for dev in self._physical_devices:
if dev not in self._visible_device_list:
continue
virtual_devices = self._virtual_device_map.get(dev)
if virtual_devices is None:
config.device_count[dev.device_type] += 1
else:
config.device_count[dev.device_type] += len(virtual_devices)
# Configure gpu_options
gpu_options = self._compute_gpu_options()
config.gpu_options.MergeFrom(gpu_options)
# Configure collective ops
if self._collective_leader:
config.experimental.collective_group_leader = self._collective_leader
if self._collective_scoped_allocator_enabled_ops:
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
for op in self._collective_scoped_allocator_enabled_ops:
rewrite_options.scoped_allocator_opts.enable_op.append(op)
if self._collective_use_nccl_communication:
config.experimental.collective_nccl = True
if self._collective_device_filters:
del config.device_filters[:]
for f in self._collective_device_filters:
config.device_filters.append(f)
return config
def _compute_gpu_options(self):
"""Build the GPUOptions proto."""
visible_device_list = []
virtual_devices = []
gpu_index = -1
memory_growths = set()
for dev in self.list_physical_devices("GPU"):
gpu_index += 1
if dev not in self._visible_device_list:
continue
growth = self._memory_growth_map[dev]
memory_growths.add(growth)
visible_device_list.append(str(gpu_index))
if self._virtual_device_map:
vdevs = self._virtual_device_map.get(dev, [])
device_limits = []
for virt_dev in vdevs:
device_limits.append(virt_dev.memory_limit)
virtual_devices.append(
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=device_limits))
# Only compute growth if virtual devices have not been configured and we
# have GPUs
if not virtual_devices and memory_growths:
if len(memory_growths) > 1:
raise ValueError("Memory growth cannot differ between GPU devices")
allow_growth = memory_growths.pop()
else:
allow_growth = None
return config_pb2.GPUOptions(
allow_growth=allow_growth,
visible_device_list=",".join(visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
@property
def function_call_options(self):
"""Returns function call options for current thread.
Note that the returned object is still referenced by the eager context.
Returns: the FunctionCallOptions for current thread.
"""
if self._thread_local_data.function_call_options is None:
config = self.config
# Default to soft placement for functions unless specified
if self._soft_device_placement is None:
config.allow_soft_placement = True
self._thread_local_data.function_call_options = FunctionCallOptions(
config_proto=config)
return self._thread_local_data.function_call_options
@function_call_options.setter
def function_call_options(self, options):
"""Returns function call options for current thread."""
self._thread_local_data.function_call_options = options
def async_wait(self):
"""Waits for ops dispatched in ASYNC mode to finish."""
pywrap_tensorflow.TFE_ContextAsyncWait(self._handle)
def async_clear_error(self):
"""Clears errors raised during ASYNC execution."""
pywrap_tensorflow.TFE_ContextAsyncClearError(self._handle)
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self.ensure_initialized()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
self.ensure_initialized()
pywrap_tensorflow.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
self.ensure_initialized()
fdef_string = fdef.SerializeToString()
pywrap_tensorflow.TFE_ContextAddFunctionDef(
self._handle, fdef_string, len(fdef_string))
def remove_function(self, name):
"""Remove a function from the context.
Once removed, the function cannot be executed anymore.
Args:
name: function signature name.
"""
self.ensure_initialized()
pywrap_tensorflow.TFE_ContextRemoveFunction(self._handle, name)
def has_function(self, name):
"""Check if a function `name` is registered."""
self.ensure_initialized()
return bool(pywrap_tensorflow.TFE_ContextHasFunction(self._handle, name))
def add_post_execution_callback(self, callback):
"""Add a post-execution callback to the context.
A post-execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added.
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that has was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute names and attribute values.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
# TODO(cais): (b/64674139) Allow access to function-internal operations.
self._post_execution_callbacks.append(callback)
def clear_post_execution_callbacks(self):
"""Clear all post-execution callbacks added to the context."""
del self._post_execution_callbacks[:]
@property
def post_execution_callbacks(self):
"""Get the list of post-execution callbacks added to the context."""
return self._post_execution_callbacks
def _initialize_physical_devices(self):
"""Get local devices visible to the system."""
# We lazy initialize self._physical_devices since we do not want to do this
# the constructor since the backend may not be initialized yet.
with self._device_lock:
if self._physical_devices is not None:
return
devs = pywrap_tensorflow.TF_ListPhysicalDevices()
self._physical_devices = [
PhysicalDevice(name=d.decode(),
device_type=d.decode().split(":")[1]) for d in devs]
# Construct the visible device list from all physical devices but ignore
# XLA devices
self._visible_device_list = [
d for d in self._physical_devices
if not d.device_type.startswith("XLA")
]
self._memory_growth_map = {
d: None for d in self._physical_devices if d.device_type == "GPU"
}
# Import device settings that may have been passed into the constructor
self._import_config()
def list_physical_devices(self, device_type=None):
"""List local devices visible to the system.
This API allows a client to query the devices before they have been
initialized by the eager runtime. Additionally a user can filter by device
type, to get only CPUs or GPUs.
Args:
device_type: Optional device type to limit results to
Returns:
List of PhysicalDevice objects.
"""
self._initialize_physical_devices()
if device_type is not None:
return [
d for d in self._physical_devices
if device_type is None or device_type == d.device_type
]
return self._physical_devices
def _import_config(self):
"""Import config if passed in during construction.
If Context was created with a ConfigProto such as when calling
tf.compat.v1.enable_eager_execution(), then we need to pull out the
various pieces we might be replacing and import then into our internal
class representation.
"""
if self._config is None:
return
num_cpus = self._config.device_count.get("CPU", 1)
if num_cpus != 1:
cpus = [d for d in self._physical_devices if d.device_type == "CPU"]
if num_cpus == 0:
self.set_visible_devices([], "CPU")
elif num_cpus > 1:
self.set_virtual_device_configuration(
cpus[0], [VirtualDeviceConfiguration() for _ in range(num_cpus)])
# Parse GPU options
gpus = [d for d in self._physical_devices if d.device_type == "GPU"]
# If there are no GPUs detected, simply ignore all the GPU options passed in
# rather than doing any validation checks.
if not gpus:
return
gpu_count = self._config.device_count.get("GPU", None)
visible_gpus = []
# TODO(gjn): Handle importing existing virtual GPU configuration
visible_indices = self._config.gpu_options.visible_device_list
if visible_indices:
for index in visible_indices.split(","):
if int(index) >= len(gpus):
raise ValueError("Invalid visible device index: %s" % index)
visible_gpus.append(gpus[int(index)])
else:
visible_gpus = gpus
if gpu_count is not None:
visible_gpus = visible_gpus[:gpu_count]
self.set_visible_devices(visible_gpus, "GPU")
def list_logical_devices(self, device_type=None):
"""Return logical devices."""
self.ensure_initialized()
devices = []
for dev in self._logical_devices:
if device_type is not None and device_type != dev.device_type:
continue
devices.append(dev)
return devices
def get_visible_devices(self, device_type=None):
"""Get the list of visible devices."""
self._initialize_physical_devices()
if device_type is None:
return self._visible_device_list
else:
return [
d for d in self._visible_device_list if d.device_type == device_type
]
def set_visible_devices(self, devices, device_type=None):
"""Set the list of visible devices."""
self._initialize_physical_devices()
if not isinstance(devices, list):
devices = [devices]
for d in devices:
if d not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(d))
if device_type is not None and d.device_type != device_type:
raise ValueError("Unrecognized device: %s" % repr(d))
visible_device_list = []
if device_type is not None:
visible_device_list = [
d for d in self._visible_device_list if d.device_type != device_type
]
visible_device_list += devices
if self._visible_device_list == visible_device_list:
return
if self._context_handle is not None:
raise RuntimeError(
"Visible devices cannot be modified after being initialized")
self._visible_device_list = visible_device_list
def get_memory_growth(self, dev):
"""Get if memory growth is enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._memory_growth_map[dev]
def set_memory_growth(self, dev, enable):
"""Set if memory growth should be enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev in self._virtual_device_map:
raise ValueError(
"Cannot set memory growth on device when virtual devices configured")
if dev.device_type != "GPU":
raise ValueError("Cannot set memory growth on non-GPU devices")
if self._memory_growth_map.get(dev) == enable:
return
if self._context_handle is not None:
raise RuntimeError(
"Physical devices cannot be modified after being initialized")
self._memory_growth_map[dev] = enable
def get_virtual_device_configuration(self, dev):
"""Get the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._virtual_device_map.get(dev)
def set_virtual_device_configuration(self, dev, virtual_devices):
"""Set the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev.device_type == "CPU":
for vdev in virtual_devices:
if vdev.memory_limit is not None:
raise ValueError("Setting memory limit on CPU virtual devices is "
"currently not supported")
elif dev.device_type == "GPU":
for vdev in virtual_devices:
if vdev.memory_limit is None:
raise ValueError(
"Setting memory limit is required for GPU virtual devices is")
else:
raise ValueError("Virtual devices are not supported for %s" %
dev.device_type())
if self._virtual_device_map.get(dev) == virtual_devices:
return
if self._context_handle is not None:
raise RuntimeError(
"Virtual devices cannot be modified after being initialized")
self._virtual_device_map[dev] = virtual_devices
@property
def optimizer_jit(self):
level = self.config.graph_options.optimizer_options.global_jit_level
return (level == config_pb2.OptimizerOptions.ON_1 or
level == config_pb2.OptimizerOptions.ON_2)
@optimizer_jit.setter
def optimizer_jit(self, enabled):
self._optimizer_jit = enabled
self._thread_local_data.function_call_options = None
def get_optimizer_experimental_options(self):
"""Get experimental options for the optimizer.
Returns:
Dictionary of current option values
"""
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
if rewrite_options.min_graph_nodes != 0:
options["min_graph_nodes"] = rewrite_options.min_graph_nodes
return options
def set_optimizer_experimental_options(self, options):
"""Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
"""
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
@property
def intra_op_parallelism_threads(self):
return self.config.intra_op_parallelism_threads
@intra_op_parallelism_threads.setter
def intra_op_parallelism_threads(self, num_threads):
if self._intra_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Intra op parallelism cannot be modified after initialization.")
self._intra_op_parallelism_threads = num_threads
@property
def inter_op_parallelism_threads(self):
return self.config.inter_op_parallelism_threads
@inter_op_parallelism_threads.setter
def inter_op_parallelism_threads(self, num_threads):
if self._inter_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Inter op parallelism cannot be modified after initialization.")
self._inter_op_parallelism_threads = num_threads
@property
def soft_device_placement(self):
return self.config.allow_soft_placement
@soft_device_placement.setter
def soft_device_placement(self, enabled):
self._soft_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def log_device_placement(self):
return self.config.log_device_placement
@log_device_placement.setter
def log_device_placement(self, enabled):
if self._log_device_placement == enabled:
return
if self._context_handle is not None:
raise RuntimeError(
"Device placement logging must be set at program startup")
self._log_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def device_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(self._handle)
return self._device_policy
@device_policy.setter
def device_policy(self, policy):
if policy is None:
policy = DEVICE_PLACEMENT_SILENT
if self._device_policy != policy:
self._device_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, self._device_policy)
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
self.ensure_initialized()
pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._handle)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle)
def enable_graph_collection(self):
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
self.ensure_initialized()
pywrap_tensorflow.TFE_ContextEnableGraphCollection(self._handle)
def disable_graph_collection(self):
"""Disables graph collection of executed functions."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableGraphCollection(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_ContextExportRunMetadata(
self._context_handle, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
def start_step(self):
pywrap_tensorflow.TFE_ContextStartStep(self._handle)
def end_step(self):
pywrap_tensorflow.TFE_ContextEndStep(self._handle)
_context = None
_context_lock = threading.Lock()
class _EagerDeviceContext(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
except KeyError:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, six.string_types):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx.ensure_initialized()
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]) # pylint: disable=protected-access
new_device_spec = new_device_spec.make_merged_spec(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError(
"Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
def _create_context():
global _context
with _context_lock:
if _context is None:
_context = Context()
def context():
"""Returns a singleton context object."""
if _context is None:
_create_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def ensure_initialized():
"""Initialize the context."""
context().ensure_initialized()
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly")
def executing_eagerly():
"""Returns True if the current thread has eager execution enabled.
Eager execution is typically enabled via
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
context of a Python function via tf.contrib.eager.py_func.
"""
if context_safe() is None:
return default_execution_mode == EAGER_MODE
return context().executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def shared_name(name=None):
"""Returns the anonymous shared name GUID if no shared name is specified.
In eager mode we need to use a unique shared name to avoid spurious sharing
issues. The runtime generates a unique name on our behalf when the reserved
GUID is used as a shared name.
Args:
name: Optional shared name
Returns:
Eager compatible shared name.
"""
if name or not executing_eagerly():
return name
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
# TODO(agarwal): get rid of this and use ops.name_scope instead.
@contextlib.contextmanager
def namescope(name):
"""ContextManager for creating hierarchical name scopes."""
ctx = context()
old_name = ctx.scope_name
ctx.scope_name = "%s/%s" % (old_name, name) if old_name else name
try:
yield
finally:
ctx.scope_name = old_name
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
ensure_initialized()
return context().device(name)
@tf_export("config.experimental_list_devices")
def list_devices():
"""List the names of the available devices.
Returns:
Names of the available devices, as a `list`.
"""
ensure_initialized()
return context().devices()
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Set if device placements should be logged.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
ctx = context()
old_mode = ctx.execution_mode
try:
ctx.execution_mode = mode
yield
finally:
ctx.execution_mode = old_mode
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def async_wait():
"""Waits for ops dispatched in ASYNC mode to finish."""
return context().async_wait()
def async_clear_error():
"""Clears errors raised during ASYNC execution mode."""
return context().async_clear_error()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables graph collection of executed functions."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
def set_server_def(server_def):
context().set_server_def(server_def)
def add_function(fdef):
"""Add a function definition to the context."""
context().add_function(fdef)
def remove_function(name):
"""Remove a function from the context."""
context().remove_function(name)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
|
tensorflow-master
|
tensorflow/python/eager/context.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Importing nn_grad for the registration functions.
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
@custom_gradient.custom_gradient
def two_outputs(a, b):
mm = math_ops.matmul(a, b)
r = math_ops.reduce_sum(mm)
def grad(dmm, dr):
return [
math_ops.matmul(dmm, b, transpose_b=True) +
math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True),
math_ops.matmul(a, dmm, transpose_b=True) +
math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True)
]
return [mm, r], grad
@custom_gradient.custom_gradient
def gradient_is_constant(x):
result = x * x
def grad(dr):
return [dr]
return result, grad
class TapeTest(test.TestCase):
def testMultiOutput(self):
def fn(x, y):
c = x + y
# Multiple outputs from split.
d, f = array_ops.split(c, 2)
return d + f
a = constant_op.constant([[1., 0.], [0., 1.]])
b = constant_op.constant([[1., 2.], [3., 4.]])
da, db = backprop.gradients_function(fn, [0, 1])(a, b)
with context.graph_mode(), self.cached_session():
tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)
tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
tf_c = tf_a + tf_b
tf_d, tf_f = array_ops.split(tf_c, 2, axis=1)
tf_e = tf_d + tf_f
tf_da, tf_db = gradients_impl.gradients(tf_e, [tf_a, tf_b])
self.assertAllEqual(da, self.evaluate(tf_da))
self.assertAllEqual(db, self.evaluate(tf_db))
def testBasicFunctional(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = constant_op.constant([[1., 0.], [0., 1.]])
bb = constant_op.constant([[1., 2.], [3., 4.]])
da, = backprop.gradients_function(forward, ['a'])(aa, bb)
self.assertAllEqual(da,
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
def testBasicFunctionalPositionalArg(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = constant_op.constant([[1., 0.], [0., 1.]])
bb = constant_op.constant([[1., 2.], [3., 4.]])
da, = backprop.gradients_function(forward, [0])(aa, bb)
self.assertAllEqual(da,
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
def testBasicFunctionalWithValue(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = constant_op.constant([[1., 0.], [0., 1.]])
bb = constant_op.constant([[1., 2.], [3., 4.]])
val, (da,) = backprop.val_and_grad_function(forward, ['a'])(aa, bb)
self.assertAllEqual(da,
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)))
self.assertAllEqual(val, forward(aa, bb))
def testTwoOutputs(self):
def fn(x, y):
mm, r = two_outputs(x, y)
return r + math_ops.reduce_sum(mm)
a = constant_op.constant([[1., 0.], [0., 1.]])
b = constant_op.constant([[1., 2.], [3., 4.]])
da, db = backprop.gradients_function(fn, [0, 1])(a, b)
with context.graph_mode(), self.cached_session():
tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)
tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
tf_mm = math_ops.matmul(tf_a, tf_b)
tf_rr = 2 * math_ops.reduce_sum(tf_mm)
tf_da, tf_db = gradients_impl.gradients(tf_rr, [tf_a, tf_b])
self.assertAllEqual(da, self.evaluate(tf_da))
self.assertAllEqual(db, self.evaluate(tf_db))
def testGcTwoOutputs(self):
def fn(x, y):
return nn_ops.sparse_softmax_cross_entropy_with_logits(logits=x,
labels=y)[0]
labels = constant_op.constant([0])
logits = constant_op.constant([[0.0]])
grad, = backprop.gradients_function(fn, [0])(logits, labels)
self.assertAllEqual(grad, [[0.0]])
def testTfTensor(self):
def fn(x):
return x
t = constant_op.constant(1.0)
g, = backprop.gradients_function(fn, [0])(t)
self.assertAllEqual(g, 1.0)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/tape_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for forward-mode automatic differentiation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# TODO(allenl): Special-case op gradients and tf.functions to avoid unnecessary
# evaluation of gradient functions.
class ForwardGradientAccumulator(object):
"""Computes Jacobian-vector products using forward-mode autodiff.
Example:
```
with ForwardGradientAccumulator() as acc:
x = tf.constant([[2.0, 3.0], [1.0, 4.0]])
acc.watch(x, tf.constant([[5., 6.], [7., 8.]]))
y = tf.reduce_sum(tf.sin(x) * tf.tan(x), axis=1)
jvp = acc.jvp(y)
```
"""
def __init__(self):
self._accumulator = None
self._recording = False
def __enter__(self):
self._push_accumulator()
return self
def __exit__(self, typ, value, traceback):
if self._recording:
self._pop_accumulator()
def _push_accumulator(self):
if self._recording:
raise ValueError("Accumulator is already recording.")
if self._accumulator is None:
self._accumulator = pywrap_tensorflow.TFE_Py_ForwardAccumulatorNew()
else:
# TODO(allenl): Allow reuse
raise NotImplementedError("Accumulator reuse isn't implemented yet.")
self._recording = True
def _pop_accumulator(self):
if not self._recording:
raise ValueError("Tape is not recording.")
pywrap_tensorflow.TFE_Py_ForwardAccumulatorSetRemove(self._accumulator)
self._recording = False
def watch(self, tensor, tangents):
"""Ensures that `tensor` is being traced by this tape.
Mathematically, `tangents` is part of a vector right-multiplying the
Jacobian matrix (a Jacobian-vector product) for the function computed while
the tape is active. Since JVPs are computed in forward mode as the
computation happens, this vector must be supplied before the computation
takes place.
Watching a single Tensor multiple times sums each `tangents`. An un-watched
Tensor has zeros for its tangent vector.
Args:
tensor: A Tensor or list of Tensors.
tangents: A Tensor or list of Tensors matching `tensor`.
"""
nest.assert_same_structure(tensor, tangents)
for t, g in zip(nest.flatten(tensor), nest.flatten(tangents)):
if not t.dtype.is_floating:
logging.log_first_n(
logging.WARN, "The dtype of the watched tensor must be "
"floating (e.g. tf.float32), got %r", 5, t.dtype)
if hasattr(t, "handle"):
# TODO(allenl): Handle watching variables.
raise NotImplementedError("Currently only Tensors may be watched.")
g = ops.convert_to_tensor(g, dtype=t.dtype)
pywrap_tensorflow.TFE_Py_ForwardAccumulatorWatch(self._accumulator, t, g)
def jvp(self, target):
"""Fetches the Jacobian-vector product computed for `target`.
Note that this function performs no computation, and simply looks up a
JVP that was already computed (unlike backprop using a
`tf.GradientTape`, where the computation happens on the call to
`tape.gradient`).
Args:
target: A watched Tensor or structure of Tensors to fetch the JVPs for.
Returns:
Tensors with the same shapes and dtypes as `target`, or None if no JVP
is available.
"""
if self._accumulator is None:
raise ValueError("Called jvp() without first tracing anything.")
return nest.map_structure(
functools.partial(pywrap_tensorflow.TFE_Py_ForwardAccumulatorJVP,
self._accumulator), target)
|
tensorflow-master
|
tensorflow/python/eager/forwardprop.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiler client APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
def start_tracing(service_addr,
logdir,
duration_ms,
worker_list='',
include_dataset_ops=True,
num_tracing_attempts=3):
"""Sends grpc requests to profiler server to perform on-demand profiling.
This method will block caller thread until receives tracing result.
Args:
service_addr: Address of profiler service e.g. localhost:6009.
logdir: Path of TensorBoard log directory e.g. /tmp/tb_log.
duration_ms: Duration of tracing or monitoring in ms.
worker_list: The list of worker TPUs that we are about to profile in the
current session. (TPU only)
include_dataset_ops: Set to false to profile longer traces.
num_tracing_attempts: Automatically retry N times when no trace event is
collected.
Raises:
UnavailableError: If no trace event is collected.
"""
if not pywrap_tensorflow.TFE_ProfilerClientStartTracing(
service_addr, logdir, worker_list, include_dataset_ops, duration_ms,
num_tracing_attempts):
raise errors.UnavailableError(None, None, 'No trace event is collected.')
def monitor(service_addr,
duration_ms,
monitoring_level=1,
display_timestamp=False):
"""Sends grpc requests to profiler server to perform on-demand monitoring.
This method will block caller thread until receives monitoring result.
Args:
service_addr: Address of profiler service e.g. localhost:6009.
duration_ms: Duration of tracing or monitoring in ms.
monitoring_level: Choose a monitoring level between 1 and 2 to monitor your
job. Level 2 is more verbose than level 1 and shows more metrics.
display_timestamp: Set to true to display timestamp in monitoring result.
Returns:
A string of monitoring output.
"""
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_ProfilerClientMonitor(service_addr, duration_ms,
monitoring_level,
display_timestamp, buffer_)
return pywrap_tensorflow.TF_GetBuffer(buffer_)
|
tensorflow-master
|
tensorflow/python/eager/profiler_client.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Utility to lift subgraphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
def _graph_inputs(op):
return [x.op for x in op.inputs] + list(op.control_inputs)
def _as_operation(op_or_tensor):
if isinstance(op_or_tensor, ops.Tensor):
return op_or_tensor.op
return op_or_tensor
class UnliftableError(Exception):
"""Raised if a Tensor cannot be lifted from the graph."""
# Prevent autograph from rewriting this error.
ag_pass_through = True
def _constant_inputs(op_or_tensor):
return all(_as_operation(i).type == u"Const"
and not _as_operation(i).control_inputs
for i in _graph_inputs(_as_operation(op_or_tensor)))
def _path_from(from_op, tensor, sources):
"""Find one path from `from_op` to `tensor`, ignoring `sources`.
Args:
from_op: A `tf.Operation`.
tensor: A `tf.Operation` or `tf.Tensor`.
sources: A list of `tf.Tensor`.
Returns:
A python string containing the path, or "??" if none is found.
"""
visited_ops = set([x.op for x in sources])
ops_to_visit = [_as_operation(tensor)]
some_op_output = {}
while ops_to_visit:
op = ops_to_visit.pop()
if op in visited_ops:
continue
visited_ops.add(op)
if op == from_op:
path_op = op
path = [path_op]
final_op = _as_operation(tensor)
while path_op != final_op:
path_op = some_op_output[path_op]
path.append(path_op)
return " <- ".join(["%s (%s)" % (x.name, x.type) for x in reversed(path)])
else:
for inp in _graph_inputs(op):
if inp not in visited_ops and inp not in sources:
some_op_output[inp] = op
ops_to_visit.append(inp)
return "??"
def _map_subgraph(init_tensor, sources, disallowed_placeholders, visited_ops,
op_outputs, add_sources):
"""Walk a Graph and capture the subgraph between init_tensor and sources.
Note: This function mutates visited_ops and op_outputs.
Arguments:
init_tensor: A Tensor or Operation where the subgraph terminates.
sources: A set of Tensors where subgraph extraction should stop.
disallowed_placeholders: An optional set of ops which may not appear in the
lifted graph. Defaults to all placeholders.
visited_ops: A set of operations which were visited in a prior pass.
op_outputs: A defaultdict containing the outputs of an op which are to be
copied into the new subgraph.
add_sources: A boolean indicating whether placeholders which are not in
sources should be allowed.
Returns:
The set of placeholders upon which init_tensor depends and are not in
sources.
Raises:
UnliftableError: if init_tensor depends on a placeholder which is not in
sources and add_sources is False.
"""
ops_to_visit = [_as_operation(init_tensor)]
extra_sources = set()
while ops_to_visit:
op = ops_to_visit.pop()
if op in visited_ops:
continue
visited_ops.add(op)
should_raise = False
if disallowed_placeholders is not None and op in disallowed_placeholders:
should_raise = True
elif op.type == "Placeholder":
if disallowed_placeholders is None and not add_sources:
should_raise = True
extra_sources.update(op.outputs)
if should_raise:
raise UnliftableError(
"Unable to lift tensor %s because it depends transitively on "
"placeholder %s via at least one path, e.g.: %s"
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
for inp in _graph_inputs(op):
op_outputs[inp].add(op)
if inp not in visited_ops and inp not in (sources or extra_sources):
ops_to_visit.append(inp)
return extra_sources
# Represents an input to `copied_op` which must be updated once
# `old_graph_tensor` has been copied.
_InputMutation = collections.namedtuple(
"_InputMutation",
["copied_op", "input_index", "old_graph_tensor"])
# Represents a control input to `copied_op` which must be added once
# `old_graph_op` has been copied.
_ControlMutation = collections.namedtuple(
"_ControlMutation",
["copied_op", "old_graph_op"])
def _copy_non_source(op, graph, op_map):
"""Copy an op directly to a given graph.
Generally `op`'s inputs should already have been copied. If this is not the
case, for example with v1 while_loops, then `_copy_non_source` inserts
placeholders for the unavailable Tensors and returns a list of required
mutations.
Args:
op: The op to be copied.
graph: The destination graph.
op_map: A dict mapping ops and tensors in the old graph to the new one.
Returns:
A tuple of (required_inputs, required_control_inputs):
required_inputs:
A list of `_InputMutation` tuples containing inputs to `copied_op` which
must be updated once `old_graph_tensor` has been copied.
required_control_inputs:
A list of `_ControlMutation` tuples containing control inputs to
`copied_op` which must be added once `old_graph_op` has been copied.
"""
input_mutations = []
control_mutations = []
copied_inputs = []
for input_index, original_input in enumerate(op.inputs):
copied_input = op_map.get(original_input, None)
if copied_input is None:
# An input for this op is missing due to a loop in the graph. We'll insert
# a placeholder for now and return information about the required post-hoc
# mutation.
copied_input = array_ops.placeholder(
name="unused_control_flow_input",
shape=original_input.shape,
dtype=original_input.dtype)
input_mutations.append(
# `copied_op` is filled in below, after we've created it.
_InputMutation(copied_op=None,
input_index=input_index,
old_graph_tensor=original_input))
copied_inputs.append(copied_input)
copied_control_inputs = []
for original_control_input in op.control_inputs:
copied_control_input = op_map.get(original_control_input, None)
if copied_control_input is None:
control_mutations.append(
_ControlMutation(copied_op=None,
old_graph_op=original_control_input))
else:
copied_control_inputs.append(copied_control_input)
# Don't copy over nodes with _tpu_replicate attribute. This attributed is used
# to signal that the op was built inside a tpu_replicate context; if we're
# lifting it to another graph we're similarly lifting it into another context.
with ops.control_dependencies(copied_control_inputs), ops.device(op.device):
copied_op = graph.create_op(
op_type=op.type,
inputs=copied_inputs,
dtypes=[x.dtype for x in op.outputs],
attrs={
key: value for key, value in op.node_def.attr.items()
if not key.startswith("_class") and
not key.startswith("_tpu_replicate")
}, # b/128981532.
name=op.name)
op_map[op] = copied_op
for i, o in enumerate(op.outputs):
op_map[o] = copied_op.outputs[i]
return ([mutation._replace(copied_op=copied_op)
for mutation in input_mutations],
[mutation._replace(copied_op=copied_op)
for mutation in control_mutations])
def _copy_source(s, graph, op_map, handle_captures, inverse_captures):
"""Create a source in a graph based on a Tensor from a different graph.
This function creates a placeholder analog of `s` in a graph with the
following behavior:
1) If s is a captured Tensor or Variable and handle_captures is set to True,
simply capture it in the new graph as well.
2) If s is a PlaceholderWithDefault whose default is a constant, preserve
said default in the new graph.
3) When applicable, copy resource variable metadata from `s` to the newly
created placeholder.
Args:
s: The source of interest.
graph: The destination graph.
op_map: A dict mapping ops and tensors in the old graph to the new one.
handle_captures: A boolean indicating whether to re-capture s in the new
graph or simply create a vanilla placeholder.
inverse_captures: A dict mapping s back to the Tensor or Variable that it
captures.
"""
if handle_captures and s in inverse_captures:
copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name)
elif s.op.type == "PlaceholderWithDefault" and _constant_inputs(s):
# Copy the default value to the graph.
default_value = s.op.inputs[0]
unavailable_inputs, unavailable_control_inputs = _copy_non_source(
op=default_value.op, graph=graph, op_map=op_map)
if unavailable_inputs or unavailable_control_inputs:
raise AssertionError(
"Could not copy source node {} because it has inputs."
.format(default_value))
with ops.device(s.op.device):
copied_placeholder = array_ops.placeholder_with_default(
input=op_map[default_value], shape=s.shape, name=s.op.name)
else:
with ops.device(s.op.device):
copied_placeholder = array_ops.placeholder(
dtype=s.dtype, shape=s.shape, name=s.op.name)
base_handle = resource_variable_ops.get_resource_handle_data(s)
if base_handle.shape_and_type:
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
copied_placeholder,
base_handle,
graph_mode=True)
op_map[s] = copied_placeholder
# Add an entry for the op of the source tensor so that if there are any nodes
# depending on that op via control dependencies it can work correctly.
op_map[s.op] = copied_placeholder.op
def lift_to_graph(init_tensors, graph, sources=None,
disallowed_placeholders=None, add_sources=False,
handle_captures=False, base_graph=None):
"""Copies the tensor and all its inputs recursively to the outer graph.
Args:
init_tensors: The Tensor to lift.
graph: The graph to lift to.
sources: Optional sequence of nodes to start from. If omitted the whole
subgraph which feeds into `init_tensor` is lifted.
disallowed_placeholders: An optional set of ops which may not appear in the
lifted graph. Defaults to all placeholders.
add_sources: A boolean indicating whether placeholders which are not in
sources should be allowed.
handle_captures: A boolean indicating whether to re-capture s in the new
graph or simply create a vanilla placeholder.
base_graph: The graph from which to lift ops. This will be inferred if not
specified.
Returns:
A mapping from ops in the current default graph to ops in `graph`.
Raises:
UnliftableError: If a placeholder blocks lifting.
"""
variable_init_tensors = {i for i in init_tensors if isinstance(
i, resource_variable_ops.ResourceVariable)}
init_tensors = set(init_tensors).difference(variable_init_tensors)
base_graph = base_graph or list(init_tensors)[0].graph
# Check that the initializer does not depend on any placeholders.
sources = set(sources or [])
visited_ops = set([x.op for x in sources])
op_outputs = collections.defaultdict(set)
# First we extract the subgraph between init_tensors and sources.
for init_tensor in init_tensors:
sources.update(_map_subgraph(
init_tensor=init_tensor,
sources=sources,
disallowed_placeholders=disallowed_placeholders,
visited_ops=visited_ops,
op_outputs=op_outputs,
add_sources=add_sources))
# Try to topologically sort the nodes we've extracted. Now we know how many of
# their outputs are part of this subgraph.
ops_to_copy = []
marked_ops = set([])
ops_to_visit = [_as_operation(t) for t in init_tensors
if not op_outputs[_as_operation(t)]]
unvisited_ops = set(ops_to_visit)
while unvisited_ops:
while ops_to_visit:
op = ops_to_visit.pop()
if op in marked_ops:
continue
marked_ops.add(op)
ops_to_copy.append(op)
for inp in _graph_inputs(op):
# Don't lift the TPUReplicateMetadata nodes out of the function, because
# it has no registered kernels.
if inp.name == "TPUReplicateMetadata":
continue
unvisited_ops.add(inp)
if (all(x in marked_ops for x in op_outputs[inp]) and
inp not in sources):
ops_to_visit.append(inp)
unvisited_ops.difference_update(marked_ops)
if unvisited_ops:
# `unvisited_ops` should only have elements if the graph has a loop. In
# this case we want to keep copying and there's no topological ordering;
# we'll do ugly post-hoc mutations instead.
ops_to_visit.append(next(iter(unvisited_ops)))
# When lifting from one FuncGraph to another, we will need to capture the
# relevant tensors as well.
captures = collections.OrderedDict()
if (isinstance(base_graph, func_graph.FuncGraph) and
isinstance(graph, func_graph.FuncGraph)):
captures = base_graph.captures
inverse_captures = {v: k for k, v in captures.items()}
# ops_to_copy now holds a reverse topologically sorted list of ops which
# ends in the initializer. We copy those to the outermost graph and
# build the initialization op there.
with graph.as_default():
op_map = {i: i for i in variable_init_tensors} # Pass through variables.
source_ops = set()
# Add the sources in the same order as the original graph.
for s in six.itervalues(captures):
if s in sources:
sources.remove(s)
source_ops.add(s.op)
_copy_source(
s=s,
graph=graph,
op_map=op_map,
handle_captures=handle_captures,
inverse_captures=inverse_captures)
for s in sources:
source_ops.add(s.op)
_copy_source(
s=s,
graph=graph,
op_map=op_map,
handle_captures=handle_captures,
inverse_captures=inverse_captures)
input_mutations = []
control_mutations = []
for op in reversed(ops_to_copy):
if op in source_ops:
continue
new_input_mutations, new_control_mutations = _copy_non_source(
op=op, graph=graph, op_map=op_map)
input_mutations.extend(new_input_mutations)
control_mutations.extend(new_control_mutations)
# Mutate the new graph to insert any loops which existed in the source
# graph due to v1 while_loops.
#
# pylint: disable=protected-access
with graph._mutation_lock():
for mutation in input_mutations:
mutation.copied_op._update_input(
mutation.input_index, op_map[mutation.old_graph_tensor])
for mutation in control_mutations:
# Don't lift the TPUReplicateMetadata nodes out of the function, because
# it has no registered kernels.
if mutation.old_graph_op.name == "TPUReplicateMetadata":
continue
mutation.copied_op._add_control_input(op_map[mutation.old_graph_op])
# pylint: enable=protected-access
return op_map
|
tensorflow-master
|
tensorflow/python/eager/lift_to_graph.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for operations in eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import weakref
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
class OpsTest(test_util.TensorFlowTestCase):
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = math_ops.matmul(three, five)
self.assertEqual([[15.0]], product.numpy())
def testExecuteStringAttr(self):
three = constant_op.constant(3.0)
checked_three = array_ops.check_numerics(three,
message='just checking')
self.assertEqual([[3]], checked_three.numpy())
def testExecuteFloatAttr(self):
three = constant_op.constant(3.0)
almost_three = constant_op.constant(2.8)
almost_equal = math_ops.approximate_equal(
three, almost_three, tolerance=0.3)
self.assertTrue(almost_equal)
def testExecuteIntAttr(self):
three = constant_op.constant(3)
four = constant_op.constant(4)
total = math_ops.add_n([three, four])
self.assertAllEqual(7, total)
def testExecuteBoolAttr(self):
three = constant_op.constant([[3]])
five = constant_op.constant([[5]])
product = math_ops.matmul(three, five, transpose_a=True)
self.assertAllEqual([[15]], product)
def testExecuteOneListOutput(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
x1, x2, x3 = array_ops.split(value, 3, axis=split_dim)
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testGraphMode(self):
graph = ops.Graph()
with graph.as_default(), context.graph_mode():
array_ops.placeholder(dtypes.int32)
self.assertEqual(1, len(graph.get_operations()))
# See comments on handling of int32 tensors on GPU in
# EagerTensor.__init__.
@test_util.run_gpu_only
def testInt32CPUDefault(self):
with context.device('/gpu:0'):
r = constant_op.constant(1) + constant_op.constant(2)
self.assertAllEqual(r, 3)
def testExecuteListOutputLen1(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen0(self):
empty = constant_op.constant([], dtype=dtypes.int32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteMultipleNonListOutput(self):
x = constant_op.constant([1, 2, 3, 4, 5, 6])
y = constant_op.constant([1, 3, 5])
result = array_ops.listdiff(x, y)
out, idx = result
self.assertTrue(out is result.out)
self.assertTrue(idx is result.idx)
self.assertAllEqual([2, 4, 6], out)
self.assertAllEqual([1, 3, 5], idx)
def testExecuteMultipleListOutput(self):
split_dim = constant_op.constant(1, dtype=dtypes.int64)
indices = constant_op.constant([[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]],
dtype=dtypes.int64)
values = constant_op.constant([2, 3, 5, 7, 11])
shape = constant_op.constant([2, 7], dtype=dtypes.int64)
result = sparse_ops.gen_sparse_ops.sparse_split(
split_dim,
indices,
values,
shape,
num_split=2)
output_indices, output_values, output_shape = result
self.assertEqual(2, len(output_indices))
self.assertEqual(2, len(output_values))
self.assertEqual(2, len(output_shape))
self.assertEqual(output_indices, result.output_indices)
self.assertEqual(output_values, result.output_values)
self.assertEqual(output_shape, result.output_shape)
self.assertAllEqual([[0, 2], [1, 0], [1, 1]], output_indices[0])
self.assertAllEqual([[0, 0], [0, 1]], output_indices[1])
self.assertAllEqual([2, 7, 11], output_values[0])
self.assertAllEqual([3, 5], output_values[1])
self.assertAllEqual([2, 4], output_shape[0])
self.assertAllEqual([2, 3], output_shape[1])
# TODO(josh11b): Test an op that has multiple outputs, some but not
# all of which are lists. Examples: barrier_take_many (currently
# unsupported since it uses a type list) or sdca_optimizer (I don't
# have an example of legal inputs & outputs).
def testComposition(self):
x = constant_op.constant(1, dtype=dtypes.int32)
three_x = x + x + x
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
def testOperatorOverrides(self):
def ops_test(v1, v2):
a = constant_op.constant(v1)
b = constant_op.constant(v2)
self.assertAllEqual((-a), np.negative(v1))
self.assertAllEqual(abs(b), np.absolute(v2))
self.assertAllEqual((a + b), np.add(v1, v2))
self.assertAllEqual((a - b), np.subtract(v1, v2))
self.assertAllEqual((a * b), np.multiply(v1, v2))
self.assertAllEqual((a * a), np.multiply(v1, v1))
if all(x >= 0 for x in v2):
self.assertAllEqual((a**b), np.power(v1, v2))
self.assertAllEqual((a / b), np.true_divide(v1, v2))
self.assertAllEqual((a / a), np.true_divide(v1, v1))
self.assertAllEqual((a % b), np.mod(v1, v2))
self.assertAllEqual((a < b), np.less(v1, v2))
self.assertAllEqual((a <= b), np.less_equal(v1, v2))
self.assertAllEqual((a > b), np.greater(v1, v2))
self.assertAllEqual((a >= b), np.greater_equal(v1, v2))
self.assertAllEqual((a == b), np.equal(v1, v2)[0])
self.assertAllEqual((a != b), np.not_equal(v1, v2)[0])
self.assertAllEqual(v1[0], a[constant_op.constant(0)])
ops_test([1, 4, 8], [2, 3, 5])
ops_test([1, -4, -5], [-2, 3, -6])
def test_basic_slice(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :], t[:, :, :])
self.assertAllEqual(npt[::, ::, ::], t[::, ::, ::])
self.assertAllEqual(npt[::1, ::1, ::1], t[::1, ::1, ::1])
self.assertAllEqual(npt[::1, ::5, ::2], t[::1, ::5, ::2])
self.assertAllEqual(npt[::-1, :, :], t[::-1, :, :])
self.assertAllEqual(npt[:, ::-1, :], t[:, ::-1, :])
self.assertAllEqual(npt[:, :, ::-1], t[:, :, ::-1])
self.assertAllEqual(npt[-2::-1, :, ::1], t[-2::-1, :, ::1])
self.assertAllEqual(npt[-2::-1, :, ::2], t[-2::-1, :, ::2])
def testDegenerateSlices(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testEllipsis(self):
npt = np.array(
[[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[0:], t[0:])
# implicit ellipsis
self.assertAllEqual(npt[0:, ...], t[0:, ...])
# ellipsis alone
self.assertAllEqual(npt[...], t[...])
# ellipsis at end
self.assertAllEqual(npt[0:1, ...], t[0:1, ...])
# ellipsis at begin
self.assertAllEqual(npt[..., 0:1], t[..., 0:1])
# ellipsis at middle
self.assertAllEqual(npt[0:1, ..., 0:1], t[0:1, ..., 0:1])
def testShrink(self):
npt = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :, :, 3], t[:, :, :, :, 3])
self.assertAllEqual(npt[..., 3], t[..., 3])
self.assertAllEqual(npt[:, 0], t[:, 0])
self.assertAllEqual(npt[:, :, 0], t[:, :, 0])
@test_util.run_gpu_only
def testOpWithInputsOnDifferentDevices(self):
# The GPU kernel for the Reshape op requires that the
# shape input be on CPU.
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = array_ops.reshape(value, shape)
self.assertAllEqual([[1], [2]], reshaped.cpu())
def testInt64(self):
# Fill requires the first input to be an int32 tensor.
self.assertAllEqual(
[1.0, 1.0],
array_ops.fill(constant_op.constant([2], dtype=dtypes.int64),
constant_op.constant(1)))
@test_util.run_gpu_only
def testOutputOnHostMemory(self):
# The Shape op kernel on GPU places the output in host memory.
value = constant_op.constant([1.]).gpu()
shape = array_ops.shape(value)
self.assertEqual([1], shape.numpy())
@test_util.run_gpu_only
def testSilentCopy(self):
# Temporarily replace the context
# pylint: disable=protected-access
del context._context
context._context = context.Context()
try:
config.set_device_policy('silent')
cpu_tensor = constant_op.constant(1.0)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
finally:
del context._context
context._context = context.Context()
# pylint: enable=protected-access
@test_util.run_gpu_only
def testSoftPlacement(self):
# Temporarily replace the context
# pylint: disable=protected-access
del context._context
context._context = context.Context()
try:
config.set_device_policy('silent')
config.set_soft_device_placement(True)
cpu_tensor = constant_op.constant(1.0)
result = cpu_tensor + cpu_tensor
self.assertEqual(result.device,
'/job:localhost/replica:0/task:0/device:GPU:0')
finally:
del context._context
context._context = context.Context()
# pylint: enable=protected-access
def testRandomUniform(self):
scalar_shape = constant_op.constant([], dtype=dtypes.int32)
x = random_ops.random_uniform(scalar_shape)
self.assertEquals(0, x.shape.ndims)
self.assertEquals(dtypes.float32, x.dtype)
x = random_ops.random_uniform(
scalar_shape, minval=constant_op.constant(5.),
maxval=constant_op.constant(6.))
self.assertLess(x, 6)
self.assertGreaterEqual(x, 5)
def testArgsToMatchingEagerDefault(self):
# Uses default
ctx = context.context()
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int32)
self.assertEquals(t, dtypes.int32)
self.assertEquals(r[0].dtype, dtypes.int32)
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int64)
self.assertEquals(t, dtypes.int64)
self.assertEquals(r[0].dtype, dtypes.int64)
# Doesn't use default
t, r = execute.args_to_matching_eager(
[['string', 'arg']], ctx, dtypes.int32)
self.assertEquals(t, dtypes.string)
self.assertEquals(r[0].dtype, dtypes.string)
def testFlattenLayer(self):
flatten_layer = core.Flatten()
x = constant_op.constant([[[-10, -20], [-30, -40]], [[10, 20], [30, 40]]])
y = flatten_layer(x)
self.assertAllEqual([[-10, -20, -30, -40], [10, 20, 30, 40]], y)
def testIdentity(self):
self.assertAllEqual(2, array_ops.identity(2))
@test_util.run_gpu_only
def testIdentityOnVariable(self):
with context.device('/gpu:0'):
v = resource_variable_ops.ResourceVariable(True)
self.assertAllEqual(True, array_ops.identity(v))
def testIncompatibleSetShape(self):
x = constant_op.constant(1)
with self.assertRaises(ValueError):
x.set_shape((1, 2))
def testCompatibleSetShape(self):
x = constant_op.constant([[1, 2]])
x.set_shape(tensor_shape.TensorShape([None, 2]))
self.assertEqual(x.get_shape(), (1, 2))
def testCastScalarToPrimitiveTypes(self):
x = constant_op.constant(1.3)
self.assertIsInstance(int(x), int)
self.assertEqual(int(x), 1)
self.assertIsInstance(float(x), float)
self.assertAllClose(float(x), 1.3)
def testCastNonScalarToPrimitiveTypesFails(self):
x = constant_op.constant([1.3, 2])
with self.assertRaises(TypeError):
int(x)
with self.assertRaises(TypeError):
float(x)
def testRange(self):
x = constant_op.constant(2)
self.assertEqual([0, 1], list(range(x)))
def testFormatString(self):
x = constant_op.constant(3.1415)
self.assertEqual('3.14', '{:.2f}'.format(x))
def testNoOpIsNone(self):
self.assertTrue(control_flow_ops.no_op() is None)
def testEagerContextPreservedAcrossThreads(self):
def init_fn():
self.assertTrue(context.executing_eagerly())
with ops.init_scope():
self.assertTrue(context.executing_eagerly())
context_switches = context.context().context_switches
self.assertEqual(len(context_switches.stack), 1)
self.assertFalse(context_switches.stack[0].is_building_function)
self.assertEqual(context_switches.stack[0].enter_context_fn,
context.eager_mode)
self.assertTrue(context.executing_eagerly())
t1 = threading.Thread(target=init_fn)
t1.start()
t1.join()
def testWeakrefEagerTensor(self):
x = constant_op.constant([[1.]])
x.at1 = constant_op.constant([[2.]])
x.at2 = 3.
weak_x = weakref.ref(x)
weak_xat1 = weakref.ref(x.at1)
del x
self.assertIs(weak_x(), None)
self.assertIs(weak_xat1(), None)
def testWeakKeyDictionaryTensor(self):
weak_key_dict = weakref.WeakKeyDictionary()
strong_x = constant_op.constant([[1.]])
strong_y = constant_op.constant([[2.]])
weak_key_dict[strong_x] = constant_op.constant([[3.]])
weak_key_dict[strong_y] = constant_op.constant([[4.]])
strong_y.a = constant_op.constant([[5.]])
weak_x = weakref.ref(strong_x)
del strong_x
self.assertIs(weak_x(), None)
self.assertEqual([strong_y], list(weak_key_dict))
self.assertEqual(1, len(list(weak_key_dict)))
self.assertEqual(1, len(weak_key_dict))
del strong_y
self.assertEqual([], list(weak_key_dict))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/ops_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ContextTest(test.TestCase):
def testSetGlobalSeed(self):
c = context.Context()
c._set_global_seed(123)
for t in [np.int32, np.int64, np.uint32, np.uint64]:
c._set_global_seed(t(123))
c._set_global_seed(np.array(123, dtype=t))
c._set_global_seed(ops.convert_to_tensor(123, dtype=t))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/eager/context_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_only_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import graph_only_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class GraphOnlyOpsTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testGraphZerosLike(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
z_tf = graph_only_ops.graph_zeros_like(x)
with self.cached_session():
self.assertAllClose(np.zeros((2, 3)), self.evaluate(z_tf))
@test_util.run_deprecated_v1
def testGraphPlaceholder(self):
x_tf = graph_only_ops.graph_placeholder(dtypes.int32, shape=(1,))
y_tf = math_ops.square(x_tf)
with self.cached_session() as sess:
x = np.array([42])
y = sess.run(y_tf, feed_dict={x_tf: np.array([42])})
self.assertAllClose(np.square(x), y)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/graph_only_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import keras
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import profiler
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import gradient_descent
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def c_tfe_py_fastpath_execute(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
try:
return pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", name,
ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
class SubclassedKerasModel(keras.Model):
def __init__(self, initializer="ones"):
super(SubclassedKerasModel, self).__init__()
self.layer_a = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_b = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_c = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_d = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_e = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")
def call(self, x):
x = self.layer_a(x)
x = self.layer_b(x)
x = self.layer_c(x)
x = self.layer_d(x)
return self.layer_e(x)
def make_keras_model(initializer="ones"):
model_input = keras.Input(shape=(10,))
x = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")(model_input)
x = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")(x)
return keras.Model(inputs=model_input, outputs=x)
def make_sequential_keras_model(initializer="ones"):
model = keras.models.Sequential()
model.add(keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros",
input_shape=(10,)))
model.add(keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros"))
return model
class MicroBenchmarks(test.Benchmark):
def __init__(self):
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 30000
def _run(self, func, num_iters, execution_mode=None):
# call func to maybe warm up the GPU
ctx = context.context()
with context.execution_mode(execution_mode):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
ctx = context.context()
handle = ctx._handle
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000)
def benchmark_create_constant(self):
func = lambda: constant_op.constant(3.0)
self._run(func, 30000)
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
def benchmark_index_tensor_with_literal(self):
func = lambda: constant_op.constant([3.0])[0]
self._run(func, 30000)
def benchmark_index_tensor_with_tensor(self):
func = lambda idx=constant_op.constant(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def benchmark_index_tensor_with_np_array(self):
func = lambda idx=np.array(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity", inputs,
attrs, 1)
self._run(f, 30000)
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters,
execution_mode=None):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs,
attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_nested_defun_matmul(self, m, transpose_b, num_iters):
inner = function.defun(math_ops.matmul)
@function.defun
def outer(a, b, c, transpose_b):
return math_ops.matmul(inner(a, b, transpose_b=transpose_b), c)
func = lambda: outer(m, m, m, transpose_b=transpose_b)
# Warmup before benchmark
for _ in range(1000):
func()
self._run(func, num_iters)
def _benchmark_defun_matmul_forward_backward(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
def func():
with backprop.GradientTape() as gt:
gt.watch(m)
y = f(m, m, transpose_b=transpose_b)
_ = gt.gradient(y, m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_read_variable(self, m, num_iters):
self._run(m.value, num_iters)
def _benchmark_matmul_read_variable(self, m, num_iters):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_matmul_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._run(m.value, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_nested_defun_matmul_2_by_2(self):
m = self._m_2_by_2.cpu()
self._benchmark_nested_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
# Benchmarks for AA.T, A of dimension 100 by 784.
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_nested_defun_matmul_100_by_784(self):
m = self._m_100_by_784.gpu()
self._benchmark_nested_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_without_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
cache_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(cache_computation, 30000)
def benchmark_defun_without_signature_and_with_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
def cache_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(cache_computation, 30000)
def benchmark_defun_with_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
signature_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(signature_computation, 30000)
def benchmark_defun_with_signature_and_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
def signature_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(signature_computation, 30000)
def benchmark_matmul_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_keras_model_subclassed(self):
model = SubclassedKerasModel()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# First call is more expensive (creates variables etc.), discount that.
func()
# The whole point of this test is to contrast subclassing with
# the functional style of keras model building, so validate that
# the models are equivalent.
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_functional(self):
model = make_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_subclassed
func()
assert np.equal(func(), SubclassedKerasModel()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_sequential(self):
model = make_sequential_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_functional
func()
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def _benchmark_keras_model_fit(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_evaluate(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.evaluate(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.evaluate(dataset, steps=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_predict(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors(tuple([data])).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.predict(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.predict(dataset, steps=1, verbose=0)
self._run(func, 1)
def benchmark_keras_model_subclassed_fit(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_graph_mode(self):
with context.graph_mode():
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_functional_fit(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode(self):
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode_with_profiler(self):
profiler.start()
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
result = profiler.stop()
assert result is not None
def benchmark_keras_model_functional_fit_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_functional_fit_run_model_eagerly_with_profiler(
self):
profiler.start()
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
result = profiler.stop()
assert result is not None
def benchmark_keras_model_sequential_fit(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_graph_mode(self):
with context.graph_mode():
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_subclassed_evaluate(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_subclassed_evaluate_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_functional_evaluate(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_functional_evaluate_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_sequential_evaluate(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_sequential_evaluate_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_subclassed_predict(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_subclassed_predict_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmark_keras_model_functional_predict(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_functional_predict_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmark_keras_model_sequential_predict(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_sequential_predict_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmarkScan(self):
elems = math_ops.range(1600)
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
def benchmarkScanDefun(self):
elems = math_ops.range(1600)
@function.defun
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
def benchmark_fastpath_conversion_type_inference(self):
c = constant_op.constant(1., dtype=dtypes.float32)
def fn():
return gen_math_ops.add(c, 1)
self._run(fn, 10000)
def _benchmarkFunctionWithResourceInputs(self, num_resources, num_iters):
@def_function.function
def add_all(*args):
return math_ops.add_n(*args)
with context.device(CPU):
resources = []
for _ in range(num_resources):
resources.append(resource_variable_ops.ResourceVariable(self._m_2))
self._run(lambda: add_all(resources), num_iters)
def benchmarkFunctionWithFiveResourceInputs(self):
self._benchmarkFunctionWithResourceInputs(5, 1000)
def benchmarkFunctionWithFiveHundredResourceInputs(self):
self._benchmarkFunctionWithResourceInputs(500, 100)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/eager/benchmarks_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining graph functions with eager semantics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from tensorflow.python.eager import context
from tensorflow.python.eager import function as function_lib
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
class UnliftedInitializerVariable(resource_variable_ops.UninitializedVariable):
"""Variable which does not lift its initializer out of function context.
Instances of this variable, when created, build a graph which runs their
initializer inside a tf.cond(is_initialized) block.
This can only be created inside a defun called from (eventually) eager
mode. That is, non-function-building graphs are not supported.
"""
def __init__(self,
initial_value=None,
trainable=None,
caching_device=None,
name=None,
dtype=None,
constraint=None,
add_initializers_to=None,
lifted_initializer_graph=None,
synchronization=None,
aggregation=None,
shape=None,
**unused_kwargs):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, GradientTapes automatically watch uses of this
Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
add_initializers_to: if not None and not in legacy graph mode, the
initializer tensor will be added to this map in addition to adding the
assignment to the function.
lifted_initializer_graph: FuncGraph to try to lift initializers to.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If called outside of a function definition.
"""
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
if not ops.inside_function():
# If we've been init_scope()d out of the function definition nothing to do
# here; we can't really do the capturing or conditional logic.
resource_variable_ops.ResourceVariable.__init__(
self, initial_value=initial_value, trainable=trainable,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint)
return
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
if isinstance(initial_value, trackable.CheckpointInitialValue):
self._maybe_initialize_trackable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
with ops.name_scope(name, "Variable", []
if init_from_fn else [initial_value]) as scope_name:
with ops.name_scope("Initializer"), ops.device(None):
initial_value = ops.convert_to_tensor(
initial_value() if init_from_fn else initial_value,
name="initial_value", dtype=dtype)
assert initial_value is not None
# Don't use `shape or initial_value.shape` since TensorShape has
# overridden `__bool__`.
if shape is None:
shape = initial_value.shape
# Use the constructor for UninitializedVariable to start. Outside the name
# scope so we don't double up the prefix.
super(UnliftedInitializerVariable, self).__init__(
trainable=trainable,
caching_device=caching_device,
name=name,
shape=shape,
dtype=initial_value.dtype,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
extra_handle_data=initial_value,
**unused_kwargs)
with ops.name_scope(scope_name):
if self._in_graph_mode:
with ops.init_scope():
outer_graph = ops.get_default_graph()
func_graph = ops.get_default_graph()
function_placeholders = (
func_graph.inputs + func_graph.internal_captures)
placeholder_ops = set(
[tensor.op for tensor in function_placeholders])
lifted_initializer = lift_to_graph.lift_to_graph(
[initial_value], outer_graph,
disallowed_placeholders=placeholder_ops)[initial_value]
with ops.init_scope():
self._initial_value = lifted_initializer
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initializer_op = resource_variable_ops.assign_variable_op(
self._handle, lifted_initializer, name=n)
else:
if add_initializers_to is not None:
add_initializers_to[self] = initial_value
def assign_fn():
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
resource_variable_ops.assign_variable_op(
self._handle,
initial_value,
name=n)
# Returning values to keep tf.cond happy.
return ops.convert_to_tensor(1)
def not_assign_fn():
return ops.convert_to_tensor(0)
# Note: this cond is always guaranteed to run because we're inside a
# defun which will insert automatic control dependencies.
control_flow_ops.cond(
resource_variable_ops.var_is_initialized_op(self._handle),
not_assign_fn, assign_fn)
RUN_FUNCTIONS_EAGERLY = False
@tf_export("config.experimental_run_functions_eagerly")
def run_functions_eagerly(run_eagerly):
"""Enables / disables eager execution of `tf.function`s.
After calling `tf.config.experimental_run_functions_eagerly(True)` all
invocations of tf.function will run eagerly instead of running through a graph
function.
This can be useful for debugging or profiling.
Similarly, calling `tf.config.experimental_run_functions_eagerly(False)` will
revert the behavior of all functions to graph functions.
Args:
run_eagerly: Boolean. Whether to run functions eagerly.
"""
global RUN_FUNCTIONS_EAGERLY
RUN_FUNCTIONS_EAGERLY = bool(run_eagerly)
class FunctionDeleter(object):
def __init__(self, func_graph):
self.func_graph = func_graph
def __del__(self):
try:
func_graph_module.dismantle_func_graph(self.func_graph)
except: # pylint: disable=bare-except
# Note: bare except here because this can be noisy at shutdown time.
pass
class Function(object):
"""Wrapper class for the graph functions defined for a Python function.
See the documentation for `tf.function` for more information on the semantics
of defined functions.
`Function` is thread-compatible.
"""
def __init__(self,
python_function,
name,
input_signature=None,
autograph=True,
experimental_autograph_options=None,
experimental_relax_shapes=False):
"""Initializes a `Function`.
Args:
python_function: the function to be wrapped.
name: the name given to it.
input_signature: a possibly nested sequence of `TensorSpec` objects
specifying the input signature of this function. If `None`, a separate
function is instantiated for each inferred input signature.
autograph: whether `python_function` should be converted to graph mode.
See https://www.tensorflow.org/guide/autograph for more information.
experimental_autograph_options: optional tuple of
tensorflow.autograph.Feature values. Allows enabling additional
conversion options when autograph is set to True.
experimental_relax_shapes: When true, argument shapes may be relaxed to
avoid unecessary retracing.
Raises:
ValueError: if `input_signature` is not None and the `python_function`'s
argspec has keyword arguments.
"""
self._python_function = python_function
self._function_spec = function_lib.FunctionSpec.from_function_and_signature(
python_function, input_signature)
self._autograph = autograph
self._experimental_autograph_options = experimental_autograph_options
self.experimental_relax_shapes = experimental_relax_shapes
self._created_variables = None
self._stateful_fn = None
self._stateless_fn = None
self._descriptor_cache = weakref.WeakKeyDictionary()
self._name = name
def _defun_with_scope(self, scope):
"""Creates a defun wrapped inside a variable creator scope."""
weak_wrapped_fn = None
def wrapped_fn(*args, **kwds):
"""Wraps `self._python_function` in a variable creator scope."""
# We register a variable creator with reduced priority. If an outer
# variable creator is just modifying keyword arguments to the variable
# constructor, this will work harmoniously. Since the `scope` registered
# here actually creates the variable, it taking priority would otherwise
# ignore the outer creator.
#
# If an outer variable creator calls the variable constructor manually,
# for example creating a MirroredVariable, then they won't call our
# creator. This means we won't be able to trace the initialization graph,
# and so variable initializers can't depend on function arguments. This is
# better than the alternative, tracing the initialization graph but giving
# the user a variable type they didn't want.
with ops.get_default_graph()._variable_creator_scope(scope, priority=50): # pylint: disable=protected-access
# __wrapped__ allows AutoGraph to swap in a converted function. We give
# the function a weak reference to itself to avoid a reference cycle.
return weak_wrapped_fn().__wrapped__(*args, **kwds)
weak_wrapped_fn = weakref.ref(wrapped_fn)
return self._defun(tf_decorator.make_decorator(
self._python_function,
wrapped_fn))
def _defun(self, fn):
"""Returns a defun generated from the input function."""
return function_lib.defun(
fn,
input_signature=self.input_signature,
autograph=self._autograph,
experimental_autograph_options=self._experimental_autograph_options,
experimental_relax_shapes=self.experimental_relax_shapes)
def _initialize(self, args, kwds, add_initializers_to=None):
"""Initializes, on the first call.
Creates two `Function`s, one that will allow creation of variables
and one that won't.
Additionally runs a trace for the `Function` that allows creation
of variables.
Args:
args: Arguments to the underlying python callable.
kwds: Keyword arguments to the python callable.
add_initializers_to: Where to collect variable initializers, if not None.
"""
created_variables = []
lifted_initializer_graph = func_graph_module.FuncGraph("initializer")
def variable_capturing_scope(unused_next_creator, **kwds):
"""Creates UnliftedInitializerVariables and saves references to them."""
v = UnliftedInitializerVariable(
add_initializers_to=add_initializers_to,
lifted_initializer_graph=lifted_initializer_graph, **kwds)
created_variables.append(weakref.ref(v))
return v
self._created_variables = created_variables
self._stateful_fn = self._defun_with_scope(variable_capturing_scope)
self._stateful_fn._name = self._name # pylint: disable=protected-access
# Force the definition of the function for these arguments
self._lifted_initializer_graph = lifted_initializer_graph
self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
self._concrete_stateful_fn = (
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
*args, **kwds))
def invalid_creator_scope(*unused_args, **unused_kwds):
"""Disables variable creation."""
raise ValueError(
"tf.function-decorated function tried to create "
"variables on non-first call.")
self._stateless_fn = self._defun_with_scope(invalid_creator_scope)
self._stateless_fn._name = self._name # pylint: disable=protected-access
def _decorate(self, decorator):
"""Allows the captured Python function to be decorated in place.
This method is only safe to call when the Function has not been called by a
user. It makes sense to use this method to push a decorator into the
function rather than wrapping the function in the decorator.
We use this in tf.Module to allow user annotated `tf.functions` to remain as
`Function` objects but still automatically enter the Module name_scope
when they are evaluated like all other methods.
Args:
decorator: A callable accepting a single argument which is the function
to decorate and returning a callable result.
Raises:
ValueError: If the function has been called a ValueError is raised.
"""
if self._stateful_fn is not None or self._stateless_fn is not None:
raise ValueError(
"Functions cannot be decorated after they have been traced.")
self._python_function = decorator(self._python_function)
self._function_spec = function_lib.FunctionSpec.from_function_and_signature(
self._python_function, self.input_signature)
def __call__(self, *args, **kwds):
"""Calls the graph function."""
context.ensure_initialized()
if RUN_FUNCTIONS_EAGERLY:
return self._python_function(*args, **kwds)
if self._created_variables:
# In this case we have created variables on the first call, so we run the
# defunned version which is guaranteed to never create variables.
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
elif self._stateful_fn is not None:
# In this case we have not created variables on the first call. So we can
# run the first trace but we should fail if variables are created.
results = self._stateful_fn(*args, **kwds)
if self._created_variables:
raise ValueError("Creating variables on a non-first call to a function"
" decorated with tf.function.")
return results
# This is the first call of __call__, so we have to initialize.
initializer_map = {}
self._initialize(args, kwds, add_initializers_to=initializer_map)
if self._created_variables:
try:
# Attempt to initialize variables eagerly and without conds by lifting
# out initialization graphs. This is the only initialization strategy
# compatible with XLA at the moment.
self._initialize_uninitialized_variables(initializer_map)
except lift_to_graph.UnliftableError:
pass # Fall through to cond-based initialization.
else:
# Lifting succeeded, so variables are initialized and we can run the
# stateless function.
return self._stateless_fn(*args, **kwds)
else:
canon_args, canon_kwds = \
self._stateful_fn._function_spec.canonicalize_function_inputs( # pylint: disable=protected-access
*args, **kwds)
# If we did not create any variables the trace we have is good enough.
return self._concrete_stateful_fn._filtered_call(canon_args, canon_kwds) # pylint: disable=protected-access
def fn_with_cond(*inner_args, **inner_kwds):
"""Conditionally runs initialization if it's needed."""
condition = True
for wr in self._created_variables:
variable = wr()
if variable is None:
raise ValueError(
"A tf.Variable created inside your tf.function has been"
" garbage-collected. Your code needs to keep Python references"
" to variables created inside `tf.function`s.\n"
"\n"
"A common way to raise this error is to create and return a"
" variable only referenced inside your function:\n"
"\n"
"@tf.function\n"
"def f():\n"
" v = tf.Variable(1.0)\n"
" return v\n"
"\n"
"v = f() # Crashes with this error message!\n"
"\n"
"The reason this crashes is that @tf.function annotated"
" function returns a **`tf.Tensor`** with the **value** of the"
" variable when the function is called rather than the"
" variable instance itself. As such there is no code holding a"
" reference to the `v` created inside the function and Python"
" garbage collects it.\n"
"\n"
"The simplest way to fix this issue is to create variables"
" outside the function and capture them:\n"
"\n"
"v = tf.Variable(1.0)\n"
"\n"
"@tf.function\n"
"def f():\n"
" return v\n"
"\n"
"f() # <tf.Tensor: ... numpy=1.>\n"
"v.assign_add(1.)\n"
"f() # <tf.Tensor: ... numpy=2.>")
condition = math_ops.logical_and(
condition, resource_variable_ops.var_is_initialized_op(
variable.handle))
# We want to call stateless_fn if possible because it avoids recomputing
# potentially expensive initializers.
return control_flow_ops.cond(
condition,
lambda: self._stateless_fn(*inner_args, **inner_kwds),
functools.partial(self._concrete_stateful_fn._filtered_call, # pylint: disable=protected-access
inner_args, inner_kwds))
# We've created variables and are unable to lift the initialization graphs,
# so we fall back to initializing with conds while running the function.
canon_args, canon_kwds = \
self._stateful_fn._function_spec.canonicalize_function_inputs( # pylint: disable=protected-access
*args, **kwds)
return function_lib.defun(fn_with_cond)(*canon_args, **canon_kwds)
@property
def python_function(self):
"""The python function wrapped in this tf.function."""
return self._python_function
@property
def input_signature(self):
return self._function_spec.input_signature
@property
def function_spec(self):
return self._function_spec
def _initialize_uninitialized_variables(self, initializer_map):
"""Make and call a `ConcreteFunction` which initializes variables."""
# Note: using defun here avoids an infinite recursion.
@function_lib.defun
def initialize_variables():
for v, init in initializer_map.items():
with ops.init_scope():
if resource_variable_ops.var_is_initialized_op(v.handle):
# Ignore variables which are already initialized at trace time.
continue
v.assign(lift_to_graph.lift_to_graph(
[init], ops.get_default_graph())[init])
with ops.init_scope():
return initialize_variables.get_concrete_function()()
def get_initialization_function(self, *args, **kwargs):
"""Returns a `ConcreteFunction` which initializes this function's variables.
Requires that this function hasn't been accessed yet through either calling
it or calling get_concrete_function. Fails if we cannot build an initializer
function which does not depend on the concrete values of the inputs to this
function.
Note that running this function will overwrite any values currently assigned
to variables, for example restores from a checkpoint.
Args:
*args: arguments to the underlying python callable.
**kwargs: keyword arguments to the python callable.
Returns:
A `ConcreteFunction` object which initializes the variables of this
function.
Raises:
RuntimeError: if called after the variables have been initialized.
"""
if self._stateful_fn is not None:
raise RuntimeError(
"get_initialization_function cannot be called after the function "
"has been used")
# Here we trace the function, collect the initializers, and attempt to
# extract them and run them eagerly. Fail only if we cannot do so.
initializer_map = {}
self._initialize(args, kwargs, add_initializers_to=initializer_map)
# Note: using defun here avoids an infinite recursion.
@function_lib.defun
def initialize_variables():
for v, init in initializer_map.items():
v.assign(lift_to_graph.lift_to_graph(
[init], ops.get_default_graph())[init])
return initialize_variables.get_concrete_function()
def _list_all_concrete_functions_for_serialization(self):
"""Returns all concrete functions for serialization.
Returns:
A list of instances of `Function`.
"""
if self.input_signature is not None:
self.get_concrete_function()
concrete_functions = []
# pylint: disable=protected-access
if self._stateful_fn:
concrete_functions.extend(
self._stateful_fn._function_cache.all_values())
if self._stateless_fn:
concrete_functions.extend(
self._stateless_fn._function_cache.all_values())
# pylint: enable=protected-access
deduplicated_concrete_functions = []
seen_signatures = []
# We are using a list so that:
# - the returned collection is deterministic, and
# - we can use a custom equality operator (is_same_structure).
# This is run only at serialization time on likely very small inputs so we
# are not concerned about O(n^2) runtime.
for concrete_function in concrete_functions:
signature = concrete_function.structured_input_signature
flattened = nest.flatten(signature)
if any(
isinstance(arg, func_graph_module.UnknownArgument)
for arg in flattened):
logging.info("Unsupported signature for serialization: %s.", signature)
continue
equal_to_signature = functools.partial(
function_lib.is_same_structure, signature, check_values=True)
if not any(equal_to_signature(s) for s in seen_signatures):
deduplicated_concrete_functions.append(concrete_function)
seen_signatures.append(signature)
return deduplicated_concrete_functions
def get_concrete_function(self, *args, **kwargs):
"""Returns a `ConcreteFunction` specialized to inputs and execution context.
If this `Function` was created with an `input_signature`, `args` and
`kwargs` may be omitted. With an input signature there is only one
concrete function associated with this `Function`.
If there is no fixed `input_signature` associated with this
`Function`, positional and keyword arguments to `get_concrete_function`
follow the same rules as input signature specification, with `tf.TensorSpec`
objects describing `tf.Tensor`s which will be passed to the concrete
function.
Each `tf.Tensor` argument to the concrete function must have a unique name,
either because it is the only one associated with a named argument of the
Python function or because an explicit `name=` was passed to its
`tf.TensorSpec` object. These names become the argument names for the
concrete function.
Arguments to the concrete function may always be specified as keyword
arguments, naming the Tensor input. Positional arguments may be used instead
when each preceding argument to the Python function is a Tensor.
```python
@tf.function
def f(x):
return x
f_concrete = f.get_concrete_function(tf.TensorSpec([], tf.float64))
f_concrete(tf.constant(1.))
f_concrete(x=tf.constant(1.))
```
Nested structures containing Tensors may be specified when retrieving
concrete functions. Structures with multiple Tensors are expanded into
multiple arguments of the concrete function. Since multiple concrete
function arguments are associated with one argument to the original
function, these Tensors must be named explicitly. Tensors in nested
structures may not be passed using positional arguments when calling the
concrete function.
```python
f_concrete2 = f.get_concrete_function(
(tf.TensorSpec(None, tf.float64, name="first"),
tf.TensorSpec([], tf.float32, name="second")))
# Keyword arguments are required when identifying Tensors in nested
# structures.
f_concrete2(first=tf.constant([1.]), second=tf.constant(0.))
```
Functions with fixed input signatures have only one concrete function
associated with them, which can be retrieved without specifying any
arguments. As before Tensors must have unique names, either inferred from
the argument names in the original Python function or specified
explicitly.
```python
@tf.function(input_signature=(tf.TensorSpec(None, tf.float32)))
def f_sig(y):
return y
f_sig_concrete = f.get_concrete_function()
f_sig_concrete(tf.constant(1.))
f_sig_concrete(y=tf.constant(1.))
```
Args:
*args: inputs to specialize on.
**kwargs: inputs to specialize on.
Returns:
A TensorFlow function which takes exactly one `tf.Tensor` per argument.
Raises:
ValueError: if this object has not yet been called on concrete values.
"""
if self._stateful_fn is None:
initializer_map = {}
self._initialize(args, kwargs, add_initializers_to=initializer_map)
self._initialize_uninitialized_variables(initializer_map)
if self._created_variables:
# In this case we have created variables on the first call, so we run the
# defunned version which is guaranteed to never create variables.
return self._stateless_fn.get_concrete_function(*args, **kwargs)
elif self._stateful_fn is not None:
# In this case we have not created variables on the first call. So we can
# run the first trace but we should fail if variables are created.
concrete = self._stateful_fn.get_concrete_function(*args, **kwargs)
if self._created_variables:
raise ValueError("Creating variables on a non-first call to a function"
" decorated with tf.function.")
return concrete
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
# `instance` here is the instance that this `Function` was accessed through
# e.g., for
#
# class Foo(object):
#
# @function.defun
# def bar(self):
# ...
#
# foo = Foo()
# foo.bar() # `foo.bar` is a `Function` instance
#
# then `instance` will be `foo` (and `owner` will be `Foo`). We create a
# new instance of `Function` here to allow different instances each
# to create variables once, thereby allowing methods to be decorated with
# tf.function. Keeps a cache to avoid retracing the function every time the
# descriptor is accessed.
if instance not in self._descriptor_cache:
if instance is None:
return self
self._descriptor_cache[instance] = (
function_lib.class_method_to_instance_method(self, instance))
return self._descriptor_cache[instance]
@tf_export("function")
def function(func=None,
input_signature=None,
autograph=True,
experimental_autograph_options=None,
experimental_relax_shapes=False):
"""Creates a callable TensorFlow graph from a Python function.
`function` constructs a callable that executes a TensorFlow graph
(`tf.Graph`) created by tracing the TensorFlow operations in `func`.
This allows the TensorFlow runtime to apply optimizations and exploit
parallelism in the computation defined by `func`.
_Example Usage_
```python
def f(x, y):
return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)
g = tf.function(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# Tensors and tf.Variables used by the Python function are captured in the
# graph.
@tf.function
def h():
return f(x, y)
assert (h().numpy() == f(x, y).numpy()).all()
# Data-dependent control flow is also captured in the graph. Supported
# control flow statements include `if`, `for`, `while`, `break`, `continue`,
# `return`.
@tf.function
def g(x):
if tf.reduce_sum(x) > 0:
return x * x
else:
return -x // 2
# print and TensorFlow side effects are supported, but exercise caution when
# using Python side effects like mutating objects, saving to files, etc.
l = []
@tf.function
def g(x):
for i in x:
print(i) # Works
tf.compat.v1.assign(v, i) # Works
tf.compat.v1.py_func(lambda i: l.append(i))(i) # Works
l.append(i) # Caution! Doesn't work.
```
Note that unlike other TensorFlow operations, we don't convert python
numerical inputs to tensors. Moreover, a new graph is generated for each
distinct python numerical value, for example calling `g(2)` and `g(3)` will
generate two new graphs (while only one is generated if you call
`g(tf.constant(2))` and `g(tf.constant(3))`). Therefore, python numerical
inputs should be restricted to arguments that will have few distinct values,
such as hyperparameters like the number of layers in a neural network. This
allows TensorFlow to optimize each variant of the neural network.
_Referencing `tf.Variable`s_
The Python function `func` may reference stateful objects (such as
`tf.Variable`).
These are captured as implicit inputs to the callable returned by `function`.
For example:
```python
c = tf.Variable(0)
@tf.function
def f(x):
c.assign_add(1)
return x + tf.compat.v1.to_float(c)
assert int(c) == 0
assert f(1.0) == 2.0
assert int(c) == 1
assert f(1.0) == 3.0
assert int(c) == 2
```
`function` can be applied to methods of an object. For example:
```python
class Dense(object):
def __init__(self):
self.W = tf.Variable(tf.compat.v1.glorot_uniform_initializer()((10, 10)))
self.b = tf.Variable(tf.zeros(10))
@tf.function
def compute(self, x):
return tf.matmul(x, self.W) + self.b
d1 = Dense()
d2 = Dense()
x = tf.random.uniform((10, 10))
# d1 and d2 are using distinct variables
assert not (d1.compute(x).numpy() == d2.compute(x).numpy()).all()
```
_Usage with `tf.keras`_
The `call` methods of a `tf.keras.Model` subclass can be decorated with
`function` in order to apply graph execution optimizations on it.
For example:
```python
class MyModel(tf.keras.Model):
def __init__(self, keep_probability=0.2):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4)
self.dense2 = tf.keras.layers.Dense(5)
self.keep_probability = keep_probability
@tf.function
def call(self, inputs, training=True):
y = self.dense2(self.dense1(inputs))
if training:
return tf.nn.dropout(y, self.keep_probability)
else:
return y
model = MyModel()
model(x, training=True) # executes a graph, with dropout
model(x, training=False) # executes a graph, without dropout
```
_Input Signatures_
`function` instantiates a separate graph for every unique set of input
shapes and datatypes. For example, the following code snippet will result
in three distinct graphs being traced, as each input has a different
shape.
```python
@tf.function
def f(x): return tf.add(x, 1.)
scalar = tf.constant(1.0)
vector = tf.constant([1.0, 1.0])
matrix = tf.constant([[3.0]])
f(scalar)
f(vector)
f(matrix)
```
An "input signature" can be optionally provided to `function` to control
the graphs traced. The input signature specifies the shape and type of each
`Tensor` argument to the function using a `tf.TensorSpec` object. For example,
the following code snippet ensures that a single graph is created where the
input `Tensor` is required to be a floating point tensor with no restrictions
on shape.
```python
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def f(x): return tf.add(x, 1.)
```
When an `input_signature` is specified, the callable will convert the inputs
to the specified TensorSpecs.
_Tracing and staging_
When `autograph` is `True`, all Python control flow that depends on `Tensor`
values is staged into a TensorFlow graph. When `autograph` is `False`, the
function is traced and control flow is not allowed to depend on data.
Note that `function` only stages TensorFlow operations, all Python code that
`func` executes and does not depend on data will shape the _construction_ of
the graph.
For example, consider the following:
```python
import numpy as np
def add_noise():
return tf.eye(5) + np.random.randn(5, 5)
traced = tf.function(add_noise)
```
`add_noise()` will return a different output every time it is invoked.
However, `traced()` will return the same value every time it is called,
since a particular random value generated by the `np.random.randn` call will
be inserted in the traced/staged TensorFlow graph as a constant. In this
particular example, replacing `np.random.randn(5, 5)` with
`tf.random.normal((5, 5))` will result in the same behavior for `add_noise()`
and `traced()`.
_Python Side-Effects_
A corollary of the previous discussion on tracing is the following: If a
Python function `func` has Python side-effects, then executing `func` multiple
times may not be semantically equivalent to executing `F = tf.function(func)`
multiple times; this difference is due to the fact that `function` only
captures the subgraph of TensorFlow operations that is constructed when `func`
is invoked to trace a graph.
The same is true if code with Python side effects is used inside control flow,
such as a loop. If your code uses side effects that are not intended to
control graph construction, wrap them inside `tf.compat.v1.py_func`.
_Retracing_
A single tf.function object might need to map to multiple computation graphs
under the hood. This should be visible only as performance (tracing graphs has
a nonzero computational and memory cost) but should not affect the correctness
of the program. A traced function should return the same result as it would
when run eagerly, assuming no unintended Python side-effects.
Calling a `tf.function` with tensor arguments of different dtypes should lead
to at least one computational graph per distinct set of dtypes. Alternatively,
always calling a `tf.function` with tensor arguments of the same shapes and
dtypes and the same non-tensor arguments should not lead to additional
retracings of your function.
Other than that, TensorFlow reserves the right to retrace functions as many
times as needed, to ensure that traced functions behave as they would when run
eagerly and to provide the best end-to-end performance. For example, the
behavior of how many traces TensorFlow will do when the function is repeatedly
called with different python scalars as arguments is left undefined to allow
for future optimizations.
To control the tracing behavior, use the following tools:
- different `tf.function` objects are guaranteed to not share traces; and
- specifying a signature or using concrete function objects returned from
get_concrete_function() guarantees that only one function graph will be
built.
Args:
func: function to be compiled. If `func` is None, returns a decorator that
can be invoked with a single argument - `func`. The end result is
equivalent to providing all the arguments up front. In other words,
`tf.function(input_signature=...)(func)` is equivalent to
`tf.function(func, input_signature=...)`. The former can be used to
decorate Python functions, for example:
@tf.function(input_signature=...)
def foo(...): ...
input_signature: A possibly nested sequence of `tf.TensorSpec` objects
specifying the shapes and dtypes of the Tensors that will be supplied to
this function. If `None`, a separate function is instantiated for each
inferred input signature. If input_signature is specified, every input to
`func` must be a `Tensor`, and `func` cannot accept `**kwargs`.
autograph: Whether autograph should be applied on `func` before tracing a
graph. This allows for dynamic control flow (Python if's, loops etc.)
in the traced graph. See https://www.tensorflow.org/guide/autograph for
more information.
experimental_autograph_options: Experimental knobs (in the form of a tuple
of tensorflow.autograph.Feature values) to control behavior when
autograph=True.
experimental_relax_shapes: When true, argument shapes may be relaxed to
avoid unecessary retracing.
Returns:
If `func` is not None, returns a callable that will execute the compiled
function (and return zero or more `tf.Tensor` objects).
If `func` is None, returns a decorator that, when invoked with a single
`func` argument, returns a callable equivalent to the case above.
Raises:
TypeError: If `input_signature` is neither `None` nor a sequence of
`TensorSpec` objects.
"""
if input_signature is not None:
function_lib.validate_signature(input_signature)
def decorated(inner_function):
try:
name = inner_function.__name__
except AttributeError:
name = "function"
return tf_decorator.make_decorator(
inner_function,
Function(
inner_function,
name,
input_signature=input_signature,
autograph=autograph,
experimental_autograph_options=experimental_autograph_options,
experimental_relax_shapes=experimental_relax_shapes))
# This code path is for the `foo = tf.function(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the
#
# @tf.function(...)
# def foo(...):
# ...
#
# use case, which is equivalent to `foo = tf.function(...)(foo)`
return decorated
|
tensorflow-master
|
tensorflow/python/eager/def_function.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(kwargs.get("name", None), "Variable") as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.inputs.append(old_variable.handle)
graph.captures[new_variable.handle] = old_variable.handle
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = set(graph.internal_captures)
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and v.handle not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[old_variable] = new_variable
existing_captures.add(old_variable.handle)
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[old_variable] = new_variable
existing_captures.add(old_variable.handle)
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(current, current)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
feeds = nest.map_structure(self.graph.as_graph_element, feeds)
flat_feeds = nest.flatten(feeds)
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = self.graph.internal_captures
flat_feeds = [f for f in flat_feeds if f not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocesing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if tensor_util.is_tensor(decoded):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, ops.Tensor):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocesing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocesing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + internal_captures)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
for external_capture, internal_capture in self.graph.captures.items():
pruned_graph.captures[external_capture] = lift_map[internal_capture]
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
pruned_graph.inputs.extend(pruned_graph.captures.values())
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple TF 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard TF 1.X API (
`tf.compat.v1.get_variable` or
`tf.compat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v + x
def increment_var_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])
increment_var = g.wrap_function(increment_var_v1,
[tf.TensorSpec([], tf.int32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(tf.constant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a TF 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`tf.compat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = tf.Variable(0)
op = tf.compat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(tf.constant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X tensorflow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
|
tensorflow-master
|
tensorflow/python/eager/wrap_function.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow 2.0 Profiler for both Eager Mode and Graph Mode.
The profiler has two mode:
- Programmatic Mode: start(), stop() and Profiler class. It will perform
when calling start() or create Profiler class and will stop
when calling stop() or destroying Profiler class.
- On-demand Mode: start_profiler_server(). It will perform profiling when
receive profiling request.
NOTE: Only one active profiler session is allowed. Use of simultaneous
Programmatic Mode and On-demand Mode is undefined and will likely fail.
NOTE: The Keras TensorBoard callback will automatically perform sampled
profiling. Before enabling customized profiling, set the callback flag
"profile_batches=[]" to disable automatic sampled profiling.
customized profiling.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import threading
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
_profiler = None
_profiler_lock = threading.Lock()
_run_num = 0
# This suffix should be kept in sync with kProfileEmptySuffix in
# tensorflow/core/profiler/rpc/client/capture_profile.cc.
_EVENT_FILE_SUFFIX = '.profile-empty'
class ProfilerAlreadyRunningError(Exception):
pass
class ProfilerNotRunningError(Exception):
pass
def start():
"""Start profiling.
Raises:
ProfilerAlreadyRunningError: If another profiling session is running.
"""
global _profiler
with _profiler_lock:
if _profiler is not None:
raise ProfilerAlreadyRunningError('Another profiler is running.')
profiler_context = pywrap_tensorflow.TFE_NewProfilerContext()
if context.default_execution_mode == context.EAGER_MODE:
context.ensure_initialized()
pywrap_tensorflow.TFE_ProfilerContextSetEagerContext(
profiler_context,
context.context()._handle) # pylint: disable=protected-access
_profiler = pywrap_tensorflow.TFE_NewProfiler(profiler_context)
pywrap_tensorflow.TFE_DeleteProfilerContext(profiler_context)
if not pywrap_tensorflow.TFE_ProfilerIsOk(_profiler):
logging.warning('Another profiler session is running which is probably '
'created by profiler server. Please avoid using profiler '
'server and profiler APIs at the same time.')
def stop():
"""Stop current profiling session and return its result.
Returns:
A binary string of tensorflow.tpu.Trace. User can write the string
to file for offline analysis by tensorboard.
Raises:
ProfilerNotRunningError: If there is no active profiling session.
"""
global _profiler
global _run_num
with _profiler_lock:
if _profiler is None:
raise ProfilerNotRunningError(
'Cannot stop profiling. No profiler is running.')
if context.default_execution_mode == context.EAGER_MODE:
context.async_wait()
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_ProfilerSerializeToString(
_profiler,
buffer_)
result = pywrap_tensorflow.TF_GetBuffer(buffer_)
pywrap_tensorflow.TFE_DeleteProfiler(_profiler)
_profiler = None
_run_num += 1
return result
def maybe_create_event_file(logdir):
"""Create an empty event file if not already exists.
This event file indicates that we have a plugins/profile/ directory in the
current logdir.
Args:
logdir: log directory.
"""
for file_name in gfile.ListDirectory(logdir):
if file_name.endswith(_EVENT_FILE_SUFFIX):
return
# TODO(b/127330388): Use summary_ops_v2.create_file_writer instead.
event_writer = pywrap_tensorflow.EventsWriter(
compat.as_bytes(os.path.join(logdir, 'events')))
event_writer.InitWithSuffix(compat.as_bytes(_EVENT_FILE_SUFFIX))
def save(logdir, result):
"""Save profile result to TensorBoard logdir.
Args:
logdir: log directory read by TensorBoard.
result: profiling result returned by stop().
"""
plugin_dir = os.path.join(
logdir, 'plugins', 'profile',
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
gfile.MakeDirs(plugin_dir)
maybe_create_event_file(logdir)
with gfile.Open(os.path.join(plugin_dir, 'local.trace'), 'wb') as f:
f.write(result)
def start_profiler_server(port):
"""Start a profiler grpc server that listens to given port.
The profiler server will keep the program running even the training finishes.
Please shutdown the server with CTRL-C. It can be used in both eager mode and
graph mode. The service defined in
tensorflow/core/profiler/profiler_service.proto. Please use
tensorflow/contrib/tpu/profiler/capture_tpu_profile to capture tracable
file following https://cloud.google.com/tpu/docs/cloud-tpu-tools#capture_trace
Args:
port: port profiler server listens to.
"""
profiler_context = pywrap_tensorflow.TFE_NewProfilerContext()
if context.default_execution_mode == context.EAGER_MODE:
context.ensure_initialized()
pywrap_tensorflow.TFE_ProfilerContextSetEagerContext(
profiler_context,
context.context()._handle) # pylint: disable=protected-access
pywrap_tensorflow.TFE_StartProfilerServer(profiler_context, port)
pywrap_tensorflow.TFE_DeleteProfilerContext(profiler_context)
class Profiler(object):
"""Context-manager eager profiler api.
Example usage:
```python
with Profiler("/path/to/logdir"):
# do some work
```
"""
def __init__(self, logdir):
self._logdir = logdir
def __enter__(self):
start()
def __exit__(self, typ, value, tb):
result = stop()
save(self._logdir, result)
|
tensorflow-master
|
tensorflow/python/eager/profiler.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer as graph_def_importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class WrapFunctionTest(test.TestCase):
def testDocString(self):
def f(x, do_add):
v = variables.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with ops.control_dependencies([op]):
return v.read_value()
f_add = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32), True])
self.assertAllEqual(f_add(1.0), 6.0)
self.assertAllEqual(f_add(1.0), 7.0)
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32), False])
self.assertAllEqual(f_sub(1.0), 4.0)
self.assertAllEqual(f_sub(1.0), 3.0)
def testPrune(self):
x_in = []
x_out = []
def f(x, y):
x_in.append(x)
xx = x * x
x_out.append(xx)
return xx, 2 * y*y
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32)] * 2)
f_pruned = f_wrapped.prune(x_in[0], [x_out[0]])
self.assertAllEqual(f_pruned(ops.convert_to_tensor(2.0)), [4.0])
def _assert_single_captured_variable_argument(self, graph_def):
# The single FunctionDef should have one argument, a captured variable
function_def, = graph_def.library.function
self.assertLen(function_def.signature.input_arg, 1)
function_arg, = function_def.signature.input_arg
self.assertEqual(dtypes.resource, dtypes.as_dtype(function_arg.type))
def testVariableLifting(self):
save_prefix = os.path.join(self.get_temp_dir(), 'meta_graph_test')
export_graph = ops.Graph()
with export_graph.as_default():
v = variables.Variable(1.)
array_ops.identity(v + 1., name='output')
saver = saver_lib.Saver([v])
with self.test_session() as session:
session.run(v.initializer)
saver.save(session, save_prefix)
def importer():
saver_lib.import_meta_graph(save_prefix + '.meta')
return ops.get_default_graph().as_graph_element('output:0')
wrapped = wrap_function.wrap_function(importer, [])
lifted_variables = list(wrapped.graph.variables)
self.assertLen(lifted_variables, 1)
initializer = wrapped.prune(
[], wrapped.graph.as_graph_element(v.initializer.name))
self.assertEqual(lifted_variables, list(initializer.graph.variables))
self.assertEqual(initializer.graph.external_captures,
wrapped.graph.external_captures)
@def_function.function
def wraps_initializer():
initializer()
wraps_initializer()
self.assertEqual(1., lifted_variables[0].numpy())
wrapped_initializer_graphdef = (
wraps_initializer.get_concrete_function().graph.as_graph_def())
self._assert_single_captured_variable_argument(wrapped_initializer_graphdef)
@def_function.function
def wraps_wrapped():
return wrapped()
# Verify that the original graph also has the correct signature.
wrapped_wrapped_graphdef = (
wraps_wrapped.get_concrete_function().graph.as_graph_def())
self._assert_single_captured_variable_argument(wrapped_wrapped_graphdef)
# Now check that the graph runs wrapped, from eager, and when pruned.
self.assertAllEqual(wraps_wrapped().numpy(),
lifted_variables[0].numpy() + 1.)
self.assertAllEqual(wrapped().numpy(), lifted_variables[0].numpy() + 1.)
pruned = wrapped.prune([], wrapped.graph.as_graph_element('output:0'))
self.assertAllEqual(wrapped().numpy(), pruned().numpy())
def testNoArguments(self):
def f():
return constant_op.constant(1.)
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(1.0, f_wrapped())
def testPruneCaptures(self):
v1 = variables.Variable(2.)
def f():
v2 = variables.Variable(3.)
return array_ops.identity(v1 * v2 * constant_op.constant(1.), 'fetch')
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(6.0, f_wrapped())
# Test pruning directly on the inputs
pruned = f_wrapped.prune(
feeds=f_wrapped.inputs,
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
self.assertAllEqual(6.0, pruned())
# Test pruning with no inputs
pruned = f_wrapped.prune(
feeds=(),
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
self.assertAllEqual(6.0, pruned())
def testCollectionsIsolation(self):
v1 = variables.Variable(2.)
v2_holder = []
def f():
v2 = variables.Variable(3.)
v2_holder.append(v2)
ops.add_to_collection(ops.GraphKeys.LOSSES, v2 * constant_op.constant(3.))
return array_ops.identity(v1 * v2 * constant_op.constant(1.), 'fetch')
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(6.0, f_wrapped())
self.assertEqual(
len(f_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)), 1)
f_var_collection = f_wrapped.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(f_var_collection), 1)
self.assertIs(f_var_collection[0], v2_holder[0])
v3_holder = []
def g():
v3 = variables.Variable(4.)
v3_holder.append(v3)
ops.add_to_collection(ops.GraphKeys.LOSSES, v3 * constant_op.constant(3.))
return array_ops.identity(v1 * v3 * constant_op.constant(1.), 'fetch')
g_wrapped = wrap_function.wrap_function(g, [])
self.assertAllEqual(8.0, g_wrapped())
self.assertEqual(
len(g_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)), 1)
g_var_collection = g_wrapped.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(g_var_collection), 1)
self.assertIs(g_var_collection[0], v3_holder[0])
# Both have only one value, and their values aren't equal. So no sharing.
self.assertNotEqual(g_wrapped.graph.get_collection(ops.GraphKeys.LOSSES),
f_wrapped.graph.get_collection(ops.GraphKeys.LOSSES))
def testGradientsOfPrune(self):
v1 = variables.Variable(2.)
v2_holder = []
def f(z):
v2 = variables.Variable(3.)
v2_holder.append(v2)
return array_ops.identity(v1 * v2 * z, 'fetch')
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtype=dtypes.float32)])
x = constant_op.constant(1.)
with backprop.GradientTape() as tape:
tape.watch(x)
out = f_wrapped(x)
grads = tape.gradient(out, [x, v1, v2_holder[0]])
self.assertAllEqual(6.0, out)
self.assertAllEqual([6.0, 3.0, 2.0], grads)
pruned = f_wrapped.prune(
feeds=f_wrapped.inputs,
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
x = constant_op.constant(1.)
with backprop.GradientTape() as tape:
tape.watch(x)
out = pruned(x)
grads = tape.gradient(out, [x, v1, v2_holder[0]])
self.assertAllEqual(6.0, out)
self.assertAllEqual([6.0, 3.0, 2.0], grads)
def testPruneOperations(self):
v = variables.Variable(0)
def f():
v.assign_add(1, name='increment', read_value=False)
f_wrapped = wrap_function.wrap_function(f, [])
pruned = f_wrapped.prune(
feeds=(),
fetches=(f_wrapped.graph.get_operation_by_name('increment'),))
self.assertEqual((None,), pruned())
self.assertEqual(1, self.evaluate(v))
del f, f_wrapped
def f1():
v.assign_add(
array_ops.placeholder(shape=[], dtype=dtypes.int32, name='step'),
name='increment', read_value=False)
return constant_op.constant(1, name='other')
f_wrapped = wrap_function.wrap_function(f1, [])
increments = f_wrapped.prune(
feeds=(f_wrapped.graph.get_tensor_by_name('step:0')),
fetches=(f_wrapped.graph.get_operation_by_name('increment'),
f_wrapped.graph.get_tensor_by_name('other:0')))
first_output, second_output = increments(constant_op.constant(2))
self.assertEqual(['step:0', 'increment/resource:0'],
[t.name for t in increments.inputs])
self.assertIs(None, first_output)
self.assertEqual(1, second_output.numpy())
self.assertEqual(3, v.numpy())
does_not_increment = f_wrapped.prune(
feeds=(f_wrapped.graph.get_tensor_by_name('step:0')),
fetches=f_wrapped.graph.get_tensor_by_name('other:0'))
self.assertEqual(1, does_not_increment(constant_op.constant(3)).numpy())
self.assertEqual(3, v.numpy())
def testPruneStatefulOpsFromWrappedFunc(self):
v0 = variables.Variable(0)
v1 = variables.Variable(0)
# When we wrap a function, we expect it to be executed with 'tf.Graph`
# rules: it's allowed to prune all ops that are not in transitive fanin of
# the fetches.
def f(x):
v0.assign_add(1, name='increment_v0')
v1.assign_add(1, name='increment_v1')
return x
f_wrapped = wrap_function.wrap_function(f, [1])
self.assertEqual(1, f_wrapped().numpy())
self.assertEqual(0, v0.numpy())
self.assertEqual(0, v1.numpy())
f_wrapped_with_name = wrap_function.wrap_function(f, [2], name='func')
self.assertEqual(2, f_wrapped_with_name().numpy())
self.assertEqual(0, v0.numpy())
self.assertEqual(0, v1.numpy())
def test_operation_returned(self):
v = variables.Variable(0)
def f():
v.assign(1, read_value=False, name='assign_to_v')
f_wrapped = wrap_function.wrap_function(f, [])
operation_to_fetch = f_wrapped.graph.get_operation_by_name('assign_to_v')
f_pruned = f_wrapped.prune(
[], operation_to_fetch)
self.assertEqual(
['assign_to_v'],
[operation.name for operation in f_pruned.graph.control_outputs])
self.assertEqual(0, v.numpy())
f_pruned()
self.assertEqual(1, v.numpy())
f_wrapped.prune([], 'assign_to_v')()
f_wrapped.prune([], meta_graph_pb2.TensorInfo(name='assign_to_v'))()
def test_function_from_graph_def(self):
@def_function.function
def make_graph_def(x):
return x + 1.
original_func_graph = make_graph_def.get_concrete_function(
tensor_spec.TensorSpec([None, 2], dtypes.float32)).graph
graph_def = original_func_graph.as_graph_def()
revived_function = wrap_function.function_from_graph_def(
graph_def, inputs=original_func_graph.inputs[0].name,
outputs=original_func_graph.outputs[0].name)
self.assertEqual(2., revived_function(constant_op.constant(1.)).numpy())
def test_create_variables_with_same_name(self):
def f():
v1 = variables.Variable(0, name='v')
v2 = variables.Variable(1, name='v')
return v1, v2
f_wrapped = wrap_function.wrap_function(f, [])
self.assertDictEqual(
{'v:0': 0, 'v_1:0': 1}, # assert that variable names are uniquified
{v.name: v.numpy()
for v in f_wrapped._variable_holder.variables.values()})
# Uniquification should reset in separate calls to wrap_function.
def f2():
v1 = variables.Variable(3, name='v')
v2 = variables.Variable(4, name='v')
return v1, v2
f_wrapped_2 = wrap_function.wrap_function(f2, [])
self.assertDictEqual(
{'v:0': 3, 'v_1:0': 4},
{v.name: v.numpy()
for v in f_wrapped_2._variable_holder.variables.values()})
class WrappedGraphTest(test.TestCase):
def testAddFunction(self):
def fn(x):
v = variables.Variable(3, name='v')
v2 = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32)
return v + v2 + x
with self.cached_session() as sess:
result = fn(constant_op.constant(5))
sess.run(variables.global_variables_initializer())
expected = sess.run(result)
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
wrapped_fn = g.wrap_function(fn, signature)
self.assertEqual(expected, wrapped_fn(constant_op.constant(5)).numpy())
def testCollections(self):
def fn(x):
v = variables.VariableV1(3, name='v', trainable=False, collections=['a'])
v2 = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32,
collections=['a', 'b'])
return v + v2 + x
def assert_collections(graph):
self.assertLen(graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), 1)
self.assertLen(graph.get_collection('a'), 2)
self.assertLen(graph.get_collection('b'), 1)
g = wrap_function.WrappedGraph()
g.wrap_function(fn, [tensor_spec.TensorSpec([], dtypes.int32)])
assert_collections(g.graph)
def assert_fn():
assert_collections(ops.get_default_graph())
return 1 # Return is required
# Assert that collections are accessible within a wrapped function.
g.wrap_function(assert_fn, [])
def testShareVariablesSameGraph(self):
def add_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(3), shape=[], dtype=dtypes.int32)
return v + x
def subtract_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32)
return v - x
def different_variable_fn_v1(x):
with variable_scope.variable_scope(
'no_reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(5), shape=[], dtype=dtypes.int32)
return v * x
def increment_variable_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(6), shape=[], dtype=dtypes.int32)
return v.assign_add(x)
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
add = g.wrap_function(add_v1, signature)
subtract = g.wrap_function(subtract_v1, signature)
different_variable_fn = g.wrap_function(different_variable_fn_v1, signature)
increment_variable = g.wrap_function(increment_variable_v1, signature)
self.assertEqual(10, add(constant_op.constant(7)).numpy())
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
# The shared variable has a starting value of 3 because add_v1 was wrapped
# first.
self.assertEqual(-4, subtract(constant_op.constant(7)).numpy())
self.assertEqual(10, increment_variable(constant_op.constant(7)).numpy())
# Check that variable updates
self.assertEqual(17, add(constant_op.constant(7)).numpy())
self.assertEqual(3, subtract(constant_op.constant(7)).numpy())
# Sanity check - result from this function shouldn't change.
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
self.assertAllEqual({'reuse/v', 'no_reuse/v'}, set(g.variables.keys()))
def testShareVariablesDifferentGraphs(self):
def add_v1(x):
v = variables.Variable(3, name='v')
return v + x
def subtract_v1(x):
v = variables.Variable(4, name='v')
return v - x
def different_variable_fn_v1(x):
with ops.name_scope('different_scope'):
v = variables.Variable(5, name='v')
return v * x
def increment_variable_v1(x):
v = variables.Variable(6, name='v')
return v.assign_add(x)
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
vh = wrap_function.VariableHolder(share_variables=True)
new_graph = lambda: wrap_function.WrappedGraph(variable_holder=vh)
add = new_graph().wrap_function(add_v1, signature)
subtract = new_graph().wrap_function(subtract_v1, signature)
different_variable_fn = new_graph().wrap_function(
different_variable_fn_v1, signature)
increment_variable = new_graph().wrap_function(
increment_variable_v1, signature)
self.assertEqual(10, add(constant_op.constant(7)).numpy())
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
# Because the variable in add_v1 was created first, its starting value is 3
# instead of the values defined in subtract_v1 or increment_variable_v1.
self.assertEqual(-4, subtract(constant_op.constant(7)).numpy())
self.assertEqual(10, increment_variable(constant_op.constant(7)).numpy())
# Check that variable updates
self.assertEqual(17, add(constant_op.constant(7)).numpy())
self.assertEqual(3, subtract(constant_op.constant(7)).numpy())
# Sanity check - result from this function shouldn't change.
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
self.assertAllEqual({'v', 'different_scope/v'}, set(vh.variables.keys()))
@test_util.run_in_graph_and_eager_modes
def testImportedFunctionsRegistered(self):
with ops.Graph().as_default() as graph:
x = array_ops.placeholder(dtypes.variant, shape=[], name='foo')
ds = dataset_ops.from_variant(x, structure=(
structure.TensorStructure(dtypes.int32, [])))
y = ds.reduce(array_ops.zeros([], dtype=dtypes.int32), lambda p, q: p + q)
graph_def = graph.as_graph_def()
def fn_to_wrap(a):
returned_elements = graph_def_importer.import_graph_def(
graph_def, input_map={x.name: a}, return_elements=[y.name])
return returned_elements[0]
wrapped_fn = wrap_function.wrap_function(
fn_to_wrap, [tensor_spec.TensorSpec((), dtypes.variant)])
ds = dataset_ops.Dataset.from_tensor_slices([10, 20])
v = dataset_ops.to_variant(ds)
self.evaluate(wrapped_fn(v))
def testReturnOp(self):
def update_var_v1(x):
v = variables.Variable(3, name='v')
update_op = state_ops.assign(v, x).op
return update_op
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
update_var = g.wrap_function(update_var_v1, signature)
self.assertEqual(g.variables['v'].numpy(), 3)
update_var(constant_op.constant(12))
self.assertEqual(g.variables['v'].numpy(), 12)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/eager/wrap_function_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for profiler_client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import profiler_client
from tensorflow.python.eager import test
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
class ProfilerClientTest(test_util.TensorFlowTestCase):
def testStartTracing_ProcessInvalidAddress(self):
with self.assertRaises(errors.UnavailableError):
profiler_client.start_tracing('localhost:6006', '/tmp/', 2000)
def testMonitor_ProcessInvalidAddress(self):
with self.assertRaises(errors.UnavailableError):
profiler_client.monitor('localhost:6006', 2000)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/eager/profiler_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execution Callbacks for Eager Mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import enum # pylint: disable=g-bad-import-order
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import execute
from tensorflow.python.platform import tf_logging as logging
class ExecutionCallback(enum.Enum):
"""Valid callback actions.
These can be passed to `seterr` or `errstate` to create callbacks when
specific events occur (e.g. an operation produces `NaN`s).
IGNORE: take no action.
PRINT: print a warning to `stdout`.
RAISE: raise an error (e.g. `InfOrNanError`).
WARN: print a warning using `tf.compat.v1.logging.warn`.
"""
IGNORE = "ignore"
PRINT = "print"
RAISE = "raise"
WARN = "warn"
_DEFAULT_CALLBACK_ACTION = ExecutionCallback.RAISE
# TODO(cais): Consider moving this exception class to errors_impl.py.
class InfOrNanError(Exception):
"""Exception for inf and/or nan being present in tensor."""
def __init__(self,
op_type,
op_name,
output_index,
num_outputs,
value):
"""Constructor of InfOrNanError.
Args:
op_type: Type name of the op that generated the tensor with
`inf`(s) or `nan`(s) (e.g., `Div`).
op_name: Name of the op that generated the tensor with `inf`(s) or
`nan`(s). This name is set by client and can be `None` if it is unset.
output_index: The 0-based output index of the tensor that contains
`inf`(s) or `nan`(s).
num_outputs: Total number of outputs of the operation.
value: The tensor value that contains `inf`(s) or `nan`(s).
"""
self._op_type = op_type
self._op_name = op_name
self._output_index = output_index
self._num_outputs = num_outputs
self._value = value
self._total_count = np.size(value)
self._inf_count = np.count_nonzero(np.isinf(value))
self._nan_count = np.count_nonzero(np.isnan(value))
super(InfOrNanError, self).__init__(self._get_error_message())
def _get_error_message(self):
"""Get the error message describing this InfOrNanError object."""
name_str = (("'%s'" % self._op_name) if self._op_name is not None
else str(self._op_name))
msg = "Output %d of %d of TFE operation %s (name: %s) contains " % (
self._output_index + 1, self._num_outputs, self._op_type, name_str)
if self._inf_count and self._nan_count:
msg += "%d inf(s) and %d nan(s) " % (self._inf_count, self._nan_count)
elif self._inf_count:
msg += "%d inf(s) " % self._inf_count
else:
msg += "%d nan(s) " % self._nan_count
msg += "out of a total of %d element(s). Tensor value: %s" % (
self._total_count, self._value)
return msg
@property
def op_type(self):
return self._op_type
@property
def op_name(self):
return self._op_name
@property
def output_index(self):
return self._output_index
@property
def num_outputs(self):
return self._num_outputs
@property
def value(self):
return self._value
def inf_nan_callback(op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=True,
check_nan=True,
action=_DEFAULT_CALLBACK_ACTION):
"""An execution callback that checks for `inf`s and `nan`s in output tensors.
This callback can be used with `tfe.add_execute_callback` to check for invalid
numeric values. E.g.,
```python
tfe.add_execute_callback(tfe.inf_nan_callback)
```
Args:
op_type: Name of the TFE operation type (e.g., `MatMul`).
inputs: The `list` of input tensors to the operation, currently unused by
this callback.
attrs: Attributes of the TFE operation, as a tuple of alternating attribute
names and attribute values.
outputs: The `list` of output tensors from the operation, checked by this
callback for `inf` and `nan` values.
op_name: Name of the TFE operation. This name is set by client and can be
`None` if it unset.
check_inf: (`bool`) Whether this callback should check for `inf` values in
the output tensor values.
check_nan: (`bool`) Whether this callback should check for `nan` values in
the output tensor values.
action: (`ExecutionCallback`) Action to be taken by the callback when
`inf` or `nan` values are detected.
Raises:
InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and
`action` is `"raise"`.
ValueError: iff the value of `action` is invalid.
"""
del attrs, inputs # Not used.
action = ExecutionCallback(action)
ctx = context.context()
for index, output in enumerate(outputs):
if not output.dtype.is_numpy_compatible:
continue
numpy_dtype = output.dtype.as_numpy_dtype
if (np.issubdtype(numpy_dtype, np.floating) or
np.issubdtype(numpy_dtype, np.complex) or
np.issubdtype(numpy_dtype, np.integer)):
try:
check_numerics_op_attrs = (
"message", "Eager-mode inf/nan check",
"T", outputs[0].dtype.as_datatype_enum)
# TODO(cais): Consider moving this into execute.py.
# pylint: disable=protected-access
ctx.ensure_initialized()
pywrap_tensorflow.TFE_Py_Execute(
ctx._handle, output.device, "CheckNumerics", [output],
check_numerics_op_attrs, 1)
# pylint: enable=protected-access
except core._NotOkStatusException: # pylint: disable=protected-access
value = output.numpy()
inf_detected = np.any(np.isinf(value)) and check_inf
nan_detected = np.any(np.isnan(value)) and check_nan
if not inf_detected and not nan_detected:
continue
error = InfOrNanError(op_type, op_name, index, len(outputs), value)
if action == ExecutionCallback.PRINT:
print("Warning: %s" % str(error))
elif action == ExecutionCallback.WARN:
logging.warn(str(error))
elif action == ExecutionCallback.RAISE:
raise error
else:
raise ValueError(
"Invalid action for inf_nan_callback: %s. Valid actions are: "
"{PRINT | WARN | RAISE}" % action)
def inf_callback(op_type,
inputs,
attrs,
outputs,
op_name,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `inf`s only."""
inf_nan_callback(
op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=True,
check_nan=False,
action=action)
def nan_callback(op_type,
inputs,
attrs,
outputs,
op_name,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `nan`s only."""
inf_nan_callback(
op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=False,
check_nan=True,
action=action)
def add_execution_callback(callback):
"""Add an execution callback to the default eager context.
An execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added. To clear all execution callbacks that have been added, use
`clear_execution_callbacks()`.
Example:
```python
def print_even_callback(op_type, inputs, attrs, outputs, op_name):
# A callback that prints only the even output values.
if outputs[0].numpy() % 2 == 0:
print("Even output from %s: %s" % (op_name or op_type, outputs))
tfe.add_execution_callback(print_even_callback)
x = tf.pow(2.0, 3.0) - 3.0
y = tf.multiply(x, tf.add(1.0, 5.0))
# When the line above is run, you will see all intermediate outputs that are
# even numbers printed to the console.
tfe.clear_execution_callbacks()
```
Args:
callback: a callable of the signature
`f(op_type, inputs, attrs, outputs, op_name)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`inputs` is the `list` of input `Tensor`(s) to the op.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute name and attribute value.
`outputs` is the `list` of output `Tensor`(s) from the op.
`op_name` is the name of the operation that was just executed. This
name is set by the client who created the operation and can be `None`
if it is unset.
Return value(s) from the callback are ignored.
"""
execute.execute = execute.execute_with_callbacks
context.context().add_post_execution_callback(callback)
def clear_execution_callbacks():
"""Clear all execution callbacks from the default eager context."""
context.context().clear_post_execution_callbacks()
def seterr(inf_or_nan=None):
"""Set how abnormal conditions are handled by the default eager context.
Example:
```python
tfe.seterr(inf_or_nan=ExecutionCallback.RAISE)
a = tf.constant(10.0)
b = tf.constant(0.0)
try:
c = a / b # <-- Raises InfOrNanError.
except Exception as e:
print("Caught Exception: %s" % e)
tfe.seterr(inf_or_nan=ExecutionCallback.IGNORE)
c = a / b # <-- Does NOT raise exception anymore.
```
Args:
inf_or_nan: An `ExecutionCallback` determining the action for infinity
(`inf`) and NaN (`nan`) values. A value of `None` leads to no change in
the action of the condition.
Returns:
A dictionary of old actions.
Raises:
ValueError: If the value of any keyword arguments is invalid.
"""
inf_or_nan = ExecutionCallback(inf_or_nan) if inf_or_nan is not None else None
old_settings = {"inf_or_nan": ExecutionCallback.IGNORE}
default_context = context.context()
carryover_callbacks = []
for callback in default_context.post_execution_callbacks:
# Check whether the callback is inf_nan_callback or a partial object of
# inf_nan_callback.
if (callback == inf_nan_callback or
isinstance(callback, functools.partial) and
callback.func == inf_nan_callback):
if callback == inf_nan_callback:
old_settings["inf_or_nan"] = _DEFAULT_CALLBACK_ACTION
else:
old_settings["inf_or_nan"] = callback.keywords.get(
"action", _DEFAULT_CALLBACK_ACTION)
elif inf_or_nan is not None:
carryover_callbacks.append(callback)
if inf_or_nan is not None:
default_context.clear_post_execution_callbacks()
for callback in carryover_callbacks:
default_context.add_post_execution_callback(callback)
if inf_or_nan != ExecutionCallback.IGNORE:
default_context.add_post_execution_callback(
functools.partial(inf_nan_callback, action=inf_or_nan))
return old_settings
@contextlib.contextmanager
def errstate(inf_or_nan=None):
"""Context manager setting error state.
Example:
```
c = tf.math.log(0.) # -inf
with errstate(inf_or_nan=ExecutionCallback.RAISE):
tf.math.log(0.) # <-- Raises InfOrNanError.
```
Args:
inf_or_nan: An `ExecutionCallback` determining the action for infinity
(`inf`) and NaN (`nan`) values. A value of `None` leads to no change in
the action of the condition.
Yields:
None.
Raises:
ValueError: If the value of any keyword arguments is invalid.
"""
if not context.executing_eagerly():
yield
else:
old_settings = seterr(inf_or_nan=inf_or_nan)
yield
seterr(**old_settings)
|
tensorflow-master
|
tensorflow/python/eager/execution_callbacks.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class FunctionGradientsTest(test.TestCase, parameterized.TestCase):
def testGraphModeWithGradients(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
@def_function.function
def step():
def inner():
return v * v
return backprop.implicit_grad(inner)()[0][0]
self.assertAllEqual(step(), 2.0)
def testGraphGradientVariable(self):
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable(1.0)
@def_function.function
def f():
return 2.0 * v
node = f()
grads, = gradients_impl.gradients(node, v)
v.initializer.run()
self.assertAllEqual(grads.eval(), 2.0)
self.assertEqual(grads.shape, v.shape)
def testSymGradGatherNd(self):
with ops.Graph().as_default(), self.cached_session() as sess:
@def_function.function
def f(x):
return array_ops.gather_nd(x, [[0]])
c = constant_op.constant([[2.]])
f_c = f(c)
g, = gradients_impl.gradients(f_c, c)
self.assertAllEqual(self.evaluate(g).values, [[1.0]])
def testNoSymGradNestedDefun(self):
@def_function.function
def outer():
@def_function.function
def f(x):
return array_ops.gather_nd(x, [[0]])
c = constant_op.constant([[2.]])
f_c = f(c)
g, = gradients_impl.gradients(f_c, c)
self.assertIsInstance(g, ops.IndexedSlices)
outer()
def testGraphFunctionWithGradients(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
@def_function.function
def step():
def inner():
return v * v
return backprop.implicit_grad(inner)()[0][0]
step_op = step.get_concrete_function()
self.assertEqual(step_op.output_dtypes, dtypes.float32)
self.assertEqual(step_op.output_shapes, tensor_shape.TensorShape([]))
self.assertAllEqual(step_op(), 2.0)
@test_util.run_in_graph_and_eager_modes()
def testDefunCondGradient(self):
@def_function.function
def f(x):
return control_flow_ops.cond(x > 0.5, lambda: 2 * x, lambda: 3 * x)
with backprop.GradientTape() as t:
x = constant_op.constant(1.0)
t.watch(x)
y = f(x)
self.assertAllEqual(self.evaluate(t.gradient(y, x)), 2.0)
@test_util.run_in_graph_and_eager_modes()
def testGraphLoopGradient(self):
@def_function.function
def f(x):
return control_flow_ops.while_loop(lambda _, i: i < 2,
lambda x, i: (2*x, i + 1),
[x, 0])[0]
with backprop.GradientTape() as t:
x = constant_op.constant(1.0)
t.watch(x)
y = f(x)
self.assertAllEqual(self.evaluate(t.gradient(y, x)), 4.0)
def testDefunDifferentiable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
def testDefunCanBeDifferentiatedTwice(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
# Ensure that v is watched again.
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
def testSymbolicGradientVariableNoneNotZerosLike(self):
with ops.Graph().as_default():
v = variables.Variable(1.0)
@def_function.function
def f(x, v):
v.read_value()
return x * x
x = constant_op.constant(1.0)
l = f(x, v)
_, dv = gradients_impl.gradients(l, [x, v])
with self.cached_session():
v.initializer.run()
self.assertEqual(dv, None)
def testDefunCallBackprop(self):
@def_function.function
def f(x):
return math_ops.add(x, x)
@def_function.function
def g(x):
return backprop.gradients_function(f, [0])(x)[0]
self.assertAllEqual(2, g(constant_op.constant(2.)))
@test_util.run_v1_only('b/120545219')
def testGraphModeEagerGradError(self):
with context.graph_mode():
def f():
x = variable_scope.get_variable(
'v', initializer=constant_op.constant(1.0))
return x * constant_op.constant(2.0)
with self.assertRaisesRegexp(ValueError,
'No trainable variables were accessed'):
backprop.implicit_val_and_grad(f)()
def testDefunCallBackpropUsingSameObjectForMultipleArguments(self):
@def_function.function
def g(x):
return backprop.gradients_function(math_ops.multiply, [0, 1])(x, x)
def np_g(x):
return [d.numpy() for d in g(x)]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x))
self.assertAllEqual([1., 1.], np_g(1.))
def testGradientTensorConversionWithDefun(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@def_function.function
def f(x):
return math_ops.add(x, three)
def g(x):
return f(x)
g = backprop.implicit_grad(g)(constant_op.constant(1.0))[0][0]
self.assertAllEqual(g, 1.0)
def testGradient(self):
matmul = def_function.function(math_ops.matmul)
def sq(x):
return matmul(x, x, transpose_a=True)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
grad_t, = backprop.gradients_function(sq, [0])(t)
self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
def testGradientInFunction(self):
@def_function.function
def f(x):
return backprop.gradients_function(lambda y: y * y, [0])(x)[0]
self.assertAllEqual(f(constant_op.constant(1.0)), 2.0)
def testGradientOfGatherWithDefun(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
grad_fn = backprop.implicit_grad(sum_gather)
gradient = grad_fn()
defun_grad_fn = backprop.implicit_grad(def_function.function(sum_gather))
defun_gradient = defun_grad_fn()
self.assertEqual(len(gradient), len(defun_gradient))
gradient = gradient[0][0]
defun_gradient = defun_gradient[0][0]
self.assertAllEqual(gradient.values, defun_gradient.values)
self.assertAllEqual(gradient.indices, defun_gradient.indices)
self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)
def testDifferentiableFunctionNoneOutputs(self):
@def_function.function
def my_function(x):
return x, None
def wrapper(x):
return my_function(x)[0]
g = backprop.gradients_function(wrapper, [0])(constant_op.constant(0.0))
self.assertAllEqual(g[0], 1.)
@def_function.function
def foo(a):
return None, a * a
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
none, r = foo(x)
g = tp.gradient(r, x)
self.assertIs(none, None)
self.assertAllEqual(r, 25.0)
self.assertAllEqual(g, 2 * 5.0)
@test_util.run_in_graph_and_eager_modes
def testNestedDifferentiableFunction(self):
@def_function.function
def inner_fn(a, b):
return a * math_ops.add(a, b)
@def_function.function
def outer_fn(x):
return inner_fn(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunction(self):
@def_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@def_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@def_function.function
def outer_fn(x):
return middle_fn(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionWithMultipleGradCalls(self):
@def_function.function
def inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def middle_fn(a, b):
return math_ops.mul(a, inner_fn(a, b))
@def_function.function
def outer_fn(x):
return middle_fn(x, 3.0)
x = constant_op.constant(5.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
self.assertAllEqual(middle_fn(3.0, x), 3.0 * (3.0 + 5.0))
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
y = constant_op.constant(4.0)
with backprop.GradientTape() as tp:
tp.watch(y)
result = outer_fn(y)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 2 * 4.0 + 3.0)
with backprop.GradientTape() as tp:
tp.watch(y)
result = inner_fn(y, y)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 2.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionGradientTapeInDefun(self):
@def_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@def_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@def_function.function
def outer_fn(x):
with backprop.GradientTape() as tp:
tp.watch(x)
result = middle_fn(x, 1.0)
grad = tp.gradient(result, x)
return grad
x = constant_op.constant(5.0)
grad = outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionGradientTapeInNestedDefun(self):
@def_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@def_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@def_function.function
def almost_outer_fn(x):
with backprop.GradientTape() as tp:
tp.watch(x)
result = middle_fn(x, 1.0)
grad = tp.gradient(result, x)
return grad
@def_function.function
def outer_fn(x):
return almost_outer_fn(x)
x = constant_op.constant(5.0)
grad = outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionGradientTapeInMultNestedDefun(self):
@def_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@def_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@def_function.function
def almost_outer_fn(x):
with backprop.GradientTape() as tp:
tp.watch(x)
result = middle_fn(x, 1.0)
grad = tp.gradient(result, x)
return grad
@def_function.function
def outer_fn(x):
return almost_outer_fn(x)
@def_function.function
def outer_outer_fn(x):
return outer_fn(x)
x = constant_op.constant(5.0)
grad = outer_outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionTFGradientInDefun(self):
@def_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@def_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@def_function.function
def outer_fn(x):
result = middle_fn(x, 1.0)
return gradients_impl.gradients(result, [x])[0]
x = constant_op.constant(5.0)
grad = outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionTFGradientInNestedDefun(self):
@def_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@def_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@def_function.function
def almost_outer_fn(x):
result = middle_fn(x, 1.0)
return gradients_impl.gradients(result, [x])[0]
@def_function.function
def outer_fn(x):
return almost_outer_fn(x)
x = constant_op.constant(5.0)
grad = outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionTFGradientInMultNestedDefun(self):
@def_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@def_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@def_function.function
def almost_outer_fn(x):
result = middle_fn(x, 1.0)
return gradients_impl.gradients(result, [x])[0]
@def_function.function
def outer_fn(x):
return almost_outer_fn(x)
@def_function.function
def outer_outer_fn(x):
return outer_fn(x)
x = constant_op.constant(5.0)
grad = outer_outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
def testDeeplyNestedDifferentiableFunctionWithVariable(self):
var = variables.Variable(constant_op.constant(1.0))
@def_function.function
def inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@def_function.function
def outer_fn(x):
return middle_fn(x, var)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
def testDeeplyNestedDifferentiableFunctionWithVariableMultipleGradCalls(self):
v = variables.Variable(constant_op.constant(3.0))
@def_function.function
def inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def middle_fn(a, b):
return math_ops.mul(a, inner_fn(a, b))
@def_function.function
def outer_fn(x):
return middle_fn(x, v)
x = constant_op.constant(5.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
self.assertAllEqual(middle_fn(v, x), 3.0 * (3.0 + 5.0))
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
y = constant_op.constant(4.0)
with backprop.GradientTape() as tp:
tp.watch(y)
result = outer_fn(y)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 2 * 4.0 + 3.0)
v.assign(constant_op.constant(1.5))
with backprop.GradientTape() as tp:
tp.watch(y)
result = outer_fn(y)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 2 * 4.0 + 1.5)
with backprop.GradientTape() as tp:
tp.watch(y)
result = inner_fn(y, v)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 1.0)
def testDeeplyNestedDifferentiableFunctionWithVariableMultipleTFGrads(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(3.0)
v.initializer.run()
@def_function.function
def inner_fn(a, b):
return math_ops.add(a, b)
@def_function.function
def middle_fn(a, b):
return math_ops.mul(a, inner_fn(a, b))
@def_function.function
def outer_fn(x):
return middle_fn(x, v)
x = constant_op.constant(5.0)
self.assertAllEqual(outer_fn(x).eval(), 5.0 * (5.0 + 3.0))
grad, = gradients_impl.gradients(outer_fn(x), x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
self.assertAllEqual(middle_fn(v, x), 3.0 * (3.0 + 5.0))
grad, = gradients_impl.gradients(outer_fn(x), x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
y = constant_op.constant(4.0)
grad, = gradients_impl.gradients(outer_fn(y), y)
self.assertAllEqual(grad, 2 * 4.0 + 3.0)
self.evaluate(v.assign(constant_op.constant(1.5)))
grad, = gradients_impl.gradients(outer_fn(y), y)
self.assertAllEqual(grad, 2 * 4.0 + 1.5)
grad, = gradients_impl.gradients(inner_fn(y, v), y)
self.assertAllEqual(grad, 1.0)
def testNestedDifferentiableFunctionNoneOutputs(self):
@def_function.function
def foo(a, b):
return None, a * math_ops.add(a, b), None, 2*a
@def_function.function
def bar(x):
return foo(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape(persistent=True) as tp:
tp.watch(x)
none1, r1, none2, r2 = bar(x)
g1 = tp.gradient(r1, x)
g2 = tp.gradient(r2, x)
self.assertAllEqual(r1, 30.0)
self.assertAllEqual(r2, 10.0)
self.assertIs(none1, None)
self.assertIs(none2, None)
self.assertAllEqual(g1, 2 * 5.0 + 1.0)
self.assertAllEqual(g2, 2.0)
def testGradientWithKeywordArguments(self):
matmul = def_function.function(math_ops.matmul)
def sq(x):
return matmul(a=x, b=x, transpose_a=True)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
grad_t, = backprop.gradients_function(sq, [0])(t)
self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(t)
one = matmul(t, b=t, transpose_a=True)
two = matmul(b=t, a=t, transpose_a=True)
three = matmul(a=t, b=t, transpose_a=True)
for output in [one, two, three]:
self.assertAllEqual(tape.gradient(output, t), [[6, 6], [14, 14]])
def testGradientInFunctionWithKeywordArguments(self):
@def_function.function
def f(x):
return backprop.gradients_function(lambda y: y * y, [0])(x)[0]
self.assertAllEqual(f(x=constant_op.constant(1.0)), 2.0)
@test_util.run_in_graph_and_eager_modes
def testBackwardNone(self):
model = variables.Variable(1.0, name='model')
count = variables.Variable(0)
@function.defun
def forward_pass(value):
count.assign_add(1)
residuals = value - model
loss = 0.5 * math_ops.reduce_mean(math_ops.pow(residuals, 2))
# Note: count is an integer, so its doutput will be None
return loss, count
def reduce_fn(x):
if context.executing_eagerly():
with backprop.GradientTape() as t:
loss, count = forward_pass(x)
return t.gradient(loss, model), count
loss, count = forward_pass(x)
grad_only = gradients_impl.gradients(loss, model)
return grad_only, count
g, _ = reduce_fn(constant_op.constant([7.0]))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(nest.flatten(self.evaluate(g)), [-6.0])
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
test.main()
|
tensorflow-master
|
tensorflow/python/eager/function_gradients_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import sys
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Note that we need to lazy load the following two modules to avoid creating
# circular dependencies.
# TODO(b/119775953): fix the circular dependencies.
pfor_ops = LazyLoader(
"pfor_ops", globals(),
"tensorflow.python.ops.parallel_for.control_flow_ops")
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
context.ensure_initialized()
h = context.context()._handle # pylint: disable=protected-access
attr_type = pywrap_tensorflow.TFE_OpNameGetAttrType(h, op_type, attr_name)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
return dtypes.as_dtype(value)
elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
return [dtypes.as_dtype(v) for v in value]
elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
return tensor_shape.as_shape(value).as_proto()
elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
elif isinstance(value, str):
return value.encode()
return value
class _MockOp(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
self.skip_input_indices = skip_input_indices
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _get_control_flow_context(self):
raise NotImplementedError(
"tf.GradientTape.gradients() does not support graph control flow "
"operations like tf.cond or tf.while at this time. Use tf.gradients() "
"instead. If you need this feature, please file a feature request at "
"https://github.com/tensorflow/tensorflow/issues/new"
)
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,
out_grads, skip_input_indices):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
skip_input_indices: a tuple that is passed to the gradient function,
indicating which inputs to skip calculating the gradient for
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
return grad_fn(mock_op, *out_grads)
pywrap_tensorflow.TFE_Py_RegisterGradientFunction(_gradient_function)
def _record_gradient(op_name, inputs, attrs, results, name):
return pywrap_tensorflow.TFE_Py_RecordGradient(op_name, inputs, attrs,
results, name)
execute.record_gradient = _record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all trainable TFE
variables accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.compat.v1.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
Raises:
ValueError: if `f` returns None.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Note: variables are returned in construction order. This ensures unique
# order across executions.
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all trainable TFE variables
accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.compat.v1.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
"""The positions of the parameters of f to be differentiated in param_args."""
try:
args = tf_inspect.getfullargspec(f).args
except TypeError as e:
# TypeError can happen when f is a callable object.
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Note that only tensors with real or complex dtypes are differentiable.
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of `f` with respect to all of `params`. The function takes an extra optional
keyword argument `dy`. Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and its derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
"""Returns a function that computes f and its vjp w.r.t.
params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
persistent: Boolean controlling whether the VJP function can be re-used.
Must be True or False.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function rturns 6.0
Raises:
ValueError: if `f` returns None.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(args[i])
if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(this_tape, args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
this_tape, nest.flatten(result), sources, output_gradients=dy)
return result, vjp
return decorated
def flatten_nested_indexed_slices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = flatten_nested_indexed_slices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(grad.indices,
g.indices),
g.dense_shape)
def aggregate_indexed_slices_gradients(grads):
"""Aggregates gradients containing `IndexedSlices`s."""
if len(grads) < 1:
return None
elif len(grads) == 1:
return grads[0]
else:
grads = [g for g in grads if g is not None]
# If any gradient is a `Tensor`, sum them up and return a dense tensor
# object.
if any(isinstance(g, ops.Tensor) for g in grads):
return math_ops.add_n(grads)
# The following `_as_indexed_slices_list` casts ids of IndexedSlices into
# int64. It is to make sure the inputs of `concat` all have same the data
# type.
grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access
grads = [flatten_nested_indexed_slices(x) for x in grads]
# Form IndexedSlices out of the concatenated values and indices.
concat_grad = ops.IndexedSlices(
array_ops.concat([x.values for x in grads], axis=0),
array_ops.concat([x.indices for x in grads], axis=0),
grads[0].dense_shape)
return concat_grad
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all(isinstance(g, ops.Tensor) for g in gradients):
return gen_math_ops.add_n(gradients)
else:
assert all(isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in gradients)
return aggregate_indexed_slices_gradients(gradients)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, ops.Tensor):
shape_tuple = grad._shape_tuple() # pylint: disable=protected-access
if shape_tuple is None or None in shape_tuple:
return 0
return functools.reduce(operator.mul, shape_tuple, 1)
if isinstance(grad, ops.IndexedSlices):
return functools.reduce(operator.mul, grad.values._shape_tuple(), 1) # pylint: disable=protected-access
raise ValueError("`grad` not a Tensor or IndexedSlices.")
def _fast_fill(value, shape, dtype):
return array_ops.fill(
constant_op.constant(shape, dtype=dtypes.int32),
constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
"""Helper to return (possibly cached) zero tensors in eager mode."""
if (dtype == dtypes.variant
or dtype == dtypes.string
or dtype == dtypes.resource):
# TODO(apassos): need to save enough information about variant tensors to do
# a zeros
return None
ctx = context.context()
if not ctx.executing_eagerly():
return array_ops.zeros(shape, dtype)
device = ctx.device_name
cache_key = shape, dtype, device
cached = ctx.zeros_cache().get(cache_key)
if cached is None:
if dtypes.as_dtype(dtype).is_bool:
value = False
else:
value = 0
cached = _fast_fill(value, shape, dtype)
ctx.zeros_cache().put(cache_key, cached)
return cached
def _ones(shape, dtype):
as_dtype = dtypes.as_dtype(dtype)
if as_dtype == dtypes.string:
return None
if not context.context().executing_eagerly():
return array_ops.ones(shape, dtype)
if as_dtype.is_bool:
value = True
else:
value = 1
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(value, dtype=dtype)
return _fast_fill(value, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
zeros_fn=_zeros,
ones_fn=_ones,
graph_shape_fn=gen_array_ops.shape)
pywrap_tensorflow.TFE_Py_RegisterVSpace(_default_vspace)
def _handle_or_self(x):
"""If x is ResourceVariable, return its handle, else x."""
if resource_variable_ops.is_resource_variable(x):
x = x.handle
return x
@tf_export("GradientTape")
class GradientTape(object):
"""Record operations for automatic differentiation.
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,
where `trainable=True` is default in both cases) are automatically watched.
Tensors can be manually watched by invoking the `watch` method on this context
manager.
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
be computed as:
```python
x = tf.constant(3.0)
with tf.GradientTape() as g:
g.watch(x)
y = x * x
dy_dx = g.gradient(y, x) # Will compute to 6.0
```
GradientTapes can be nested to compute higher-order derivatives. For example,
```python
x = tf.constant(3.0)
with tf.GradientTape() as g:
g.watch(x)
with tf.GradientTape() as gg:
gg.watch(x)
y = x * x
dy_dx = gg.gradient(y, x) # Will compute to 6.0
d2y_dx2 = g.gradient(dy_dx, x) # Will compute to 2.0
```
By default, the resources held by a GradientTape are released as soon as
GradientTape.gradient() method is called. To compute multiple gradients over
the same computation, create a persistent gradient tape. This allows multiple
calls to the gradient() method as resources are released when the tape object
is garbage collected. For example:
```python
x = tf.constant(3.0)
with tf.GradientTape(persistent=True) as g:
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, x) # 108.0 (4*x^3 at x = 3)
dy_dx = g.gradient(y, x) # 6.0
del g # Drop the reference to the tape
```
By default GradientTape will automatically watch any trainable variables that
are accessed inside the context. If you want fine grained control over which
variables are watched you can disable automatic tracking by passing
`watch_accessed_variables=False` to the tape constructor:
```python
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(variable_a)
y = variable_a ** 2 # Gradients will be available for `variable_a`.
z = variable_b ** 3 # No gradients will be available since `variable_b` is
# not being watched.
```
Note that when using models you should ensure that your variables exist when
using `watch_accessed_variables=False`. Otherwise it's quite easy to make your
first iteration not have any gradients:
```python
a = tf.keras.layers.Dense(32)
b = tf.keras.layers.Dense(32)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(a.variables) # Since `a.build` has not been called at this point
# `a.variables` will return an empty list and the
# tape will not be watching anything.
result = b(a(inputs))
tape.gradient(result, a.variables) # The result of this computation will be
# a list of `None`s since a's variables
# are not being watched.
```
Note that only tensors with real or complex dtypes are differentiable.
"""
def __init__(self, persistent=False, watch_accessed_variables=True):
"""Creates a new GradientTape.
Args:
persistent: Boolean controlling whether a persistent gradient tape
is created. False by default, which means at most one call can
be made to the gradient() method on this object.
watch_accessed_variables: Boolean controlling whether the tape will
automatically `watch` any (trainable) variables accessed while the tape
is active. Defaults to True meaning gradients can be requested from any
result computed in the tape derived from reading a trainable `Variable`.
If False users must explicitly `watch` any `Variable`s they want to
request gradients from.
"""
self._tape = None
self._persistent = persistent
self._watch_accessed_variables = watch_accessed_variables
self._recording = False
self._created_eagerly = context.executing_eagerly()
if self._created_eagerly:
context.ensure_initialized()
context.context().start_step()
def __enter__(self):
"""Enters a context inside which operations are recorded on this tape."""
self._push_tape()
return self
def __exit__(self, typ, value, traceback):
"""Exits the recording context, no further operations are traced."""
if self._recording:
self._pop_tape()
def _push_tape(self):
if self._recording:
raise ValueError("Tape is already recording.")
if self._tape is None:
self._tape = tape.push_new_tape(
persistent=self._persistent,
watch_accessed_variables=self._watch_accessed_variables)
else:
tape.push_tape(self._tape)
self._recording = True
def _pop_tape(self):
if not self._recording:
raise ValueError("Tape is not recording.")
tape.pop_tape(self._tape)
self._recording = False
def __del__(self):
if self._created_eagerly:
try:
context.context().end_step()
except AttributeError:
pass
except TypeError:
pass
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
"""
for t in nest.flatten(tensor):
if not t.dtype.is_floating:
logging.log_first_n(
logging.WARN, "The dtype of the watched tensor must be "
"floating (e.g. tf.float32), got %r", 5, t.dtype)
if hasattr(t, "handle"):
# There are many variable-like objects, all of them currently have
# `handle` attribute that points to a tensor. If this changes, internals
# of watch_variable need to change as well.
tape.watch_variable(self._tape, t)
else:
tape.watch(self._tape, t)
@tf_contextlib.contextmanager
def stop_recording(self):
"""Temporarily stops recording operations on this tape.
Operations executed while this context manager is active will not be
recorded on the tape. This is useful for reducing the memory used by tracing
all computations.
For example:
```
with tf.GradientTape(persistent=True) as t:
loss = compute_loss(model)
with t.stop_recording():
# The gradient computation below is not traced, saving memory.
grads = t.gradient(loss, model.variables)
```
Yields:
None
Raises:
RuntimeError: if the tape is not currently recording.
"""
if self._tape is None:
raise RuntimeError(
"Trying to stop recording a tape which is not recording.")
self._pop_tape()
try:
yield
finally:
self._push_tape()
def reset(self):
"""Clears all information stored in this tape.
Equivalent to exiting and reentering the tape context manager with a new
tape. For example, the two following code blocks are equivalent:
```
with tf.GradientTape() as t:
loss = loss_fn()
with tf.GradientTape() as t:
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
# The following is equivalent to the above
with tf.GradientTape() as t:
loss = loss_fn()
t.reset()
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
```
This is useful if you don't want to exit the context manager for the tape,
or can't because the desired reset point is inside a control flow construct:
```
with tf.GradientTape() as t:
loss = ...
if loss > k:
t.reset()
```
"""
self._pop_tape()
self._tape = None
self._push_tape()
def watched_variables(self):
"""Returns variables watched by this tape in order of construction."""
return self._tape.watched_variables()
def gradient(self,
target,
sources,
output_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Computes the gradient using operations recorded in context of this tape.
Args:
target: Tensor (or list of tensors) to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of
target. Defaults to None.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
ValueError: if the target is a variable or if unconnected gradients is
called with an unknown value.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once on "
"non-persistent tapes.")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(
logging.WARN, "Calling GradientTape.gradient on a persistent "
"tape inside its context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derivatives.", 1)
flat_targets = []
for t in nest.flatten(target):
if not t.dtype.is_floating:
logging.vlog(
logging.WARN, "The dtype of the target tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if resource_variable_ops.is_resource_variable(t):
with self:
t = ops.convert_to_tensor(t)
flat_targets.append(t)
flat_sources = nest.flatten(sources)
flat_sources_raw = flat_sources
flat_sources = [_handle_or_self(x) for x in flat_sources]
for t in flat_sources_raw:
if not t.dtype.is_floating:
logging.vlog(
logging.WARN, "The dtype of the source tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if output_gradients is not None:
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in nest.flatten(output_gradients)]
flat_grad = imperative_grad.imperative_grad(
self._tape,
flat_targets,
flat_sources,
output_gradients=output_gradients,
sources_raw=flat_sources_raw,
unconnected_gradients=unconnected_gradients)
if not self._persistent:
self._tape = None
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
def jacobian(self,
target,
sources,
unconnected_gradients=UnconnectedGradients.NONE,
parallel_iterations=None,
experimental_use_pfor=True):
"""Computes the jacobian using operations recorded in context of this tape.
See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) for the
definition of a Jacobian.
Example usage:
```python
with tf.GradientTape() as g:
x = tf.constant([1.0, 2.0])
g.watch(x)
y = x * x
jacobian = g.jacobian(y, x)
# jacobian value is [[2., 0.], [0., 4.]]
```
Args:
target: Tensor to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
parallel_iterations: A knob to control how many iterations are dispatched
in parallel. This knob can be used to control the total memory usage.
experimental_use_pfor: If true, vectorizes the jacobian computation. Else
falls back to a sequential while_loop. Vectorization can sometimes fail
or lead to excessive memory usage. This option can be used to disable
vectorization in such cases.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`.
Raises:
RuntimeError: If called on a non-persistent tape with eager execution
enabled and without enabling experimental_use_pfor.
ValueError: If vectorization of jacobian computation fails.
"""
flat_sources = nest.flatten(sources)
target_static_shape = target.shape
target_shape = array_ops.shape(target)
# Note that we push and pop the tape here and below. This is needed since we
# need gradients through the enclosed operations.
self._push_tape()
target = array_ops.reshape(target, [-1])
self._pop_tape()
def loop_fn(i):
self._push_tape()
y = array_ops.gather(target, i)
self._pop_tape()
return self.gradient(y, flat_sources,
unconnected_gradients=unconnected_gradients)
try:
target_size = int(target.shape[0])
except TypeError:
target_size = array_ops.shape(target)[0]
if experimental_use_pfor:
try:
output = pfor_ops.pfor(loop_fn, target_size,
parallel_iterations=parallel_iterations)
except ValueError as err:
six.reraise(
ValueError,
ValueError(
str(err) + "\nEncountered an exception while vectorizing the "
"jacobian computation. Vectorization can be disabled by setting"
" experimental_use_pfor to False."),
sys.exc_info()[2])
else:
if context.executing_eagerly() and not self._persistent:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the jacobian with eager execution enabled and with "
" experimental_use_pfor set to False.")
output = pfor_ops.for_loop(
loop_fn, [target.dtype] * len(flat_sources), target_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(output):
if out is not None:
new_shape = array_ops.concat(
[target_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
if context.executing_eagerly():
out.set_shape(target_static_shape.concatenate(flat_sources[i].shape))
output[i] = out
return nest.pack_sequence_as(sources, output)
def batch_jacobian(self,
target,
source,
unconnected_gradients=UnconnectedGradients.NONE,
parallel_iterations=None,
experimental_use_pfor=True):
"""Computes and stacks per-example jacobians.
See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) for the
definition of a Jacobian. This function is essentially an efficient
implementation of the following:
`tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`.
Note that compared to `GradientTape.jacobian` which computes gradient of
each output value w.r.t each input value, this function is useful when
`target[i,...]` is independent of `source[j,...]` for `j != i`. This
assumption allows more efficient computation as compared to
`GradientTape.jacobian`. The output, as well as intermediate activations,
are lower dimensional and avoid a bunch of redundant zeros which would
result in the jacobian computation given the independence assumption.
Example usage:
```python
with tf.GradientTape() as g:
x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)
g.watch(x)
y = x * x
batch_jacobian = g.batch_jacobian(y, x)
# batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
```
Args:
target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n].
`target[i,...]` should only depend on `source[i,...]`.
source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m].
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
parallel_iterations: A knob to control how many iterations are dispatched
in parallel. This knob can be used to control the total memory usage.
experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else
uses a tf.while_loop.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
RuntimeError: If called on a non-persistent tape with eager execution
enabled and without enabling experimental_use_pfor.
ValueError: If vectorization of jacobian computation fails or if first
dimension of `target` and `source` do not match.
"""
target_shape = target.shape
if target_shape.rank is None:
dim = tensor_shape.Dimension(None)
else:
dim = target_shape.dims[0]
if not (target_shape.with_rank_at_least(2) and
source.shape.with_rank_at_least(2) and
dim.is_compatible_with(source.shape[0])):
raise ValueError(
"Need first dimension of target shape (%s) and "
"source shape (%s) to match." % (target.shape, source.shape))
if target_shape.is_fully_defined():
batch_size = int(target_shape[0])
target_row_size = target_shape.num_elements() // batch_size
else:
target_shape = array_ops.shape(target)
batch_size = target_shape[0]
target_row_size = array_ops.size(target) // batch_size
source_shape = array_ops.shape(source)
# Flatten target to 2-D.
# Note that we push and pop the tape here and below. This is needed since we
# need gradients through the enclosed operations.
self._push_tape()
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, source_shape[0])]):
target = array_ops.reshape(target, [batch_size, target_row_size])
self._pop_tape()
def loop_fn(i):
self._push_tape()
y = array_ops.gather(target, i, axis=1)
self._pop_tape()
return self.gradient(y, source,
unconnected_gradients=unconnected_gradients)
if experimental_use_pfor:
try:
output = pfor_ops.pfor(loop_fn, target_row_size,
parallel_iterations=parallel_iterations)
except ValueError as err:
six.reraise(
ValueError,
ValueError(
str(err) + "\nEncountered an exception while vectorizing the "
"batch_jacobian computation. Vectorization can be disabled by "
"setting experimental_use_pfor to False."),
sys.exc_info()[2])
else:
if context.executing_eagerly() and not self._persistent:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian with eager execution enabled and "
" with experimental_use_pfor set to False.")
output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size,
parallel_iterations=parallel_iterations)
if output is None:
return None
output = array_ops.reshape(output,
[target_row_size, batch_size, -1])
output = array_ops.transpose(output, [1, 0, 2])
new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)
return array_ops.reshape(output, new_shape)
|
tensorflow-master
|
tensorflow/python/eager/backprop.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Defun decorator for defining graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import threading
import types as types_lib
import weakref
import numpy as np
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# Loaded lazily due to a circular dependency (roughly
# tf.function->autograph->->dataset->tf.function).
# TODO(b/133251390): Use a regular import.
ag_ctx = lazy_loader.LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
FORWARD_FUNCTION_ATTRIBUTE_NAME = "forward_function_name"
BACKWARD_FUNCTION_ATTRIBUTE_NAME = "backward_function_name"
CacheKey = collections.namedtuple("CacheKey", [
"input_signature", "parent_graph", "device_functions", "colocation_stack",
"in_cross_replica_context"
])
CacheKey.replace = CacheKey._replace # pylint: disable=protected-access
def _flat_shape_list(*params):
"""Return a flat list of TensorShapes, one for each tensor[spec] in `*params`.
If `params` contains `CompositeTensors`, then they are expanded to their
components `Tensors`.
Args:
*params: Set of nested entries containing Tensors, TensorSpec, and
non-tensors.
Returns:
A list of entries containing either `None` or `TensorShape`.
"""
return [tensor_shape.TensorShape(x.shape)
if isinstance(x, (ops.Tensor, tensor_spec.TensorSpec)) else None
for x in nest.flatten(params, expand_composites=True)]
def _shape_less_specific_than(relaxed, to_check):
"""Checks if `relaxed` is less specific than `to_check`.
This is an asymmetric check, unlike `TensorShape.is_compatible_with`. If
`to_check` has a dimension with an undefined shape, `relaxed` must also have
an undefined shape for that dimension.
Args:
relaxed: A `TensorShape` to check against.
to_check: A second `TensorShape`.
Returns:
True if `to_check` represents a set of shapes which is a subset of
`relaxed`'s shapes and False otherwise.
"""
if to_check.dims is not None and relaxed.dims is not None:
if to_check.rank != relaxed.rank:
return False
for check_dim, relaxed_dim in zip(to_check.dims, relaxed.dims):
if check_dim.value is None and relaxed_dim.value is not None:
return False
if not relaxed_dim.is_compatible_with(check_dim):
return False
return True
def _compatible_shapes(flat_relaxed, flat_to_check):
"""Check if lists of TensorShapes contain compatible shapes.
Checks that each `flat_relaxed` shape covers a superset of the shapes of the
corresponding `flat_to_check` shape.
Args:
flat_relaxed: List of TensorShape or None.
flat_to_check: List of TensorShape or None.
Returns:
A python bool.
Raises:
RuntimeError:
if `len(flat_relaxed) != len(flat_to_check)`.
RuntimeError:
if `flat_relaxed[i] is None != flat_to_check[i] is None` for any `i`.
"""
if len(flat_relaxed) != len(flat_to_check):
raise RuntimeError("Expected shape lists of identical lengths, but saw: "
"%s and %s" % (flat_relaxed, flat_to_check))
def is_compatible(relaxed, to_check):
"""Internal help function.
Args:
relaxed: TensorShape or None.
to_check: TensorShape or None.
Returns:
Python bool.
Raises:
RuntimeError: If `relaxed is None != to_check is None`.
"""
# If both x and y are None, there is no shape to compare. Otherwise check
# if they are compatible with each other. Either way, both input signatures
# must have have Tensors in the same entries. If not, raise an assertion
# error.
if relaxed is None != to_check is None:
raise RuntimeError(
"Expected signature type matches between flattened input shapes "
"%s and %s; but saw that (%s is None) != (%s is None)"
% (flat_relaxed, flat_to_check, relaxed, to_check))
return relaxed is None or _shape_less_specific_than(relaxed, to_check)
return all(is_compatible(relaxed, to_check)
for relaxed, to_check in zip(flat_relaxed, flat_to_check))
def common_shape(x, y):
"""Find a `TensorShape` that is compatible with both `x` and `y`."""
if x is None != y is None:
raise RuntimeError(
"Cannot find a common shape when LHS shape is None but RHS shape "
"is not (or vice versa): %s vs. %s" % (x, y))
if x is None:
return None # The associated input was not a Tensor, no shape generated.
if not isinstance(x, tensor_shape.TensorShape):
raise TypeError("Expected x to be a TensorShape but saw %s" % (x,))
if not isinstance(y, tensor_shape.TensorShape):
raise TypeError("Expected y to be a TensorShape but saw %s" % (y,))
if x.rank != y.rank or x.rank is None:
return tensor_shape.TensorShape(None)
dims = []
for dim_x, dim_y in zip(x.dims, y.dims):
if (dim_x != dim_y
or tensor_shape.dimension_value(dim_x) is None
or tensor_shape.dimension_value(dim_y) is None):
dims.append(None)
else:
dims.append(tensor_shape.dimension_value(dim_x))
return tensor_shape.TensorShape(dims)
def is_same_structure(structure1,
structure2,
check_values=False):
"""Check two structures for equality, optionally of types and of values."""
try:
nest.assert_same_structure(structure1, structure2, expand_composites=True)
except (ValueError, TypeError):
return False
if check_values:
flattened1 = nest.flatten(structure1, expand_composites=True)
flattened2 = nest.flatten(structure2, expand_composites=True)
# First check the types to avoid AttributeErrors.
if any(type(f1) != type(f2) for f1, f2 in zip(flattened1, flattened2)):
return False
return flattened1 == flattened2
return True
def _parse_func_attrs(attributes):
"""Convert the keyword arguments into function_def attributes.
Currently only support primitive types: bool, int, float and string.
Args:
attributes: the dictionary of attributes.
Returns:
A dict of attributes where the key is the name of attribute and the value
is the AttrValue proto.
Raises:
ValueError: If the kwargs contains unwhitelisted name or unsupported value
types.
"""
attrs = {}
for key, value in attributes.items():
if isinstance(value, attr_value_pb2.AttrValue):
attrs[key] = value
# bool type check has to happen before int since bool is a subclass of int.
elif isinstance(value, bool):
attrs[key] = attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
attrs[key] = attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
attrs[key] = attr_value_pb2.AttrValue(f=value)
elif isinstance(value, (str, bytes, six.text_type)):
attrs[key] = attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(key, type(value)))
return attrs
class _InterpolateFunctionError(object):
"""Context Manager that interpolates the exception from 'top_level_func'."""
def __init__(self, top_level_func):
self._func = top_level_func
def __enter__(self):
pass
def __exit__(self, typ, exc, tb):
if not exc or not isinstance(exc, errors.OpError):
return False
message = compat.as_text(exc.message)
_, tags = error_interpolation.parse_message(message)
g = None
func_stack = []
for t in tags:
if t.type == "function_node":
# TODO(mdan): Tests should cover this.
if t.name == compat.as_str(self._func.name):
g = self._func.graph
elif g:
next_func = g._get_function(t.name)
if next_func is not None and isinstance(next_func,
_EagerDefinedFunction):
g = next_func.graph
if g:
func_stack.append(g.name)
else:
func_stack.append("<unknown>")
if g:
message = error_interpolation.interpolate(message, g)
message += "\n\nFunction call stack:\n"
message += " -> ".join(func_stack)
message += "\n"
exc._message = message # pylint: disable=protected-access
return False
def _forward_name(n):
"""The name of a generated forward defun named n."""
return "__forward_%s_%s" % (n, ops.uid())
def _backward_name(n):
"""The name of a generated backward defun named n."""
return "__backward_%s_%s" % (n, ops.uid())
def _inference_name(n):
"""The name of a forward-but-no-gradient defun named n."""
return "__inference_%s_%s" % (n, ops.uid())
class _EagerDefinedFunctionDeleter(object):
"""Unregister function from eager context."""
def __init__(self, name):
self.name = name
def __del__(self):
try:
context.remove_function(self.name)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction
# so it doesn't have the definition-generating logic and is just a container for
# an already-defined function.
class _EagerDefinedFunction(object):
"""Callable with the interface of `framework.function._DefinedFunction`.
`_EagerDefinedFunction` encapsulates a function definition and its properties,
and it provides a method for calling the encapsulated function. Some Ops
take functions as attributes, which have type `func`; an instance of this
class may be provided as the value of these `func` attributes.
"""
def __init__(self, name, graph, inputs, outputs, attrs):
"""Initializes an eager defined function.
Args:
name: str, the name for the created function.
graph: Graph, the graph containing the operations in the function
inputs: the tensors in the graph to be used as inputs to the function
outputs: the tensors in the graph which will be outputs to the function
attrs: dict mapping names of attributes to their AttrValue values
"""
input_ops = set(arg.op for arg in inputs)
operations = [op for op in graph.get_operations() if op not in input_ops]
fn = pywrap_tensorflow.TF_GraphToFunction_wrapper(
graph._c_graph, # pylint: disable=protected-access
compat.as_str(name),
False,
[o._c_op for o in operations], # pylint: disable=protected-access
[t._as_tf_output() for t in inputs], # pylint: disable=protected-access
[t._as_tf_output() for t in outputs], # pylint: disable=protected-access
[],
[o._c_op for o in graph.control_outputs], # pylint: disable=protected-access
[], # control_output_names
None,
compat.as_str(""))
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(iga): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use status.
pywrap_tensorflow.TF_FunctionSetAttrValueProto(
fn, compat.as_str(name), serialized)
# TODO(apassos) avoid creating a FunctionDef (specially to grab the
# signature, but also in general it's nice not to depend on it.
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TF_FunctionToFunctionDef(fn, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(compat.as_bytes(proto_data))
self.name = compat.as_bytes(function_def.signature.name)
with ops.init_scope():
if context.executing_eagerly():
context.ensure_initialized()
context.add_function(fn)
self._function_deleter = _EagerDefinedFunctionDeleter(self.name)
self._registered_on_context = True
self.definition = function_def
self.signature = function_def.signature
self._num_outputs = len(self.signature.output_arg)
self._output_types = [o.type for o in self.signature.output_arg]
self._output_shapes = [o.shape for o in outputs]
self._control_captures = graph.control_captures
self._func_graph_outputs = outputs
self.grad_func_name = None
self.python_grad_func = None
self._c_func = c_api_util.ScopedTFFunction(fn)
self._grad_func = None
self.graph = graph
self._stateful_ops = tuple(op for op in operations if op.op_def.is_stateful)
def add_to_graph(self, g=None):
# pylint: disable=protected-access
if not g and context.executing_eagerly():
context.context().add_function_def(self.definition)
else:
if self.name not in g._functions:
g._add_function(self)
for f in self.graph._functions.values():
if f.name not in g._functions:
g._add_function(f)
# pylint: enable=protected-access
@property
def stateful_ops(self):
return self._stateful_ops
def call(self, ctx, args):
"""Calls this function with `args` as inputs.
`ConcreteFunction` execution respects device annotations only if the
function won't be compiled with xla.
Args:
ctx: a Context object
args: a list of arguments to supply this function with.
Returns:
The outputs of the function call.
Raises:
ValueError: if the number of arguments is incorrect.
"""
if len(args) != len(self.signature.input_arg):
raise ValueError(
"Arguments and signature arguments do not match: %s %s " %
(len(args), len(list(self.signature.input_arg))))
function_call_options = ctx.function_call_options
if function_call_options.config_proto_serialized is None:
config = function_utils.get_disabled_rewriter_config()
else:
config = function_call_options.config_proto_serialized
executor_type = function_call_options.executor_type or ""
executing_eagerly = ctx.executing_eagerly()
if executing_eagerly:
with _InterpolateFunctionError(self):
outputs = execute.execute(
str(self.signature.name),
num_outputs=self._num_outputs,
inputs=args,
attrs=("executor_type", executor_type,
"config_proto", config),
ctx=ctx)
# Replace empty list with None
outputs = outputs or None
else:
# TODO(akshayka): Either remove this if the FunctionLibraryRuntime
# creates `PartitionedCallOp` kernels by default, or remove the previous
# branch if a TPU kernel is registered for `PartitionedCall`.
with _InterpolateFunctionError(self):
with ops.control_dependencies(self._control_captures):
outputs = functional_ops.partitioned_call(
args=args,
f=self,
tout=self._output_types,
executing_eagerly=executing_eagerly,
config=config,
executor_type=executor_type)
if executing_eagerly:
return outputs
else:
# TODO(b/128924522): This additional set_shape should not be
# necessary. ShapeRefiner likely needs to inspect handle_data. Remove this
# once that's done.
for i, shape in enumerate(self._output_shapes):
outputs[i].set_shape(shape)
for i, func_graph_output in enumerate(self._func_graph_outputs):
custom_gradient.copy_handle_data(func_graph_output, outputs[i])
return outputs
class ConcreteFunction(object):
"""Callable object encapsulating a function definition and its gradient.
`ConcreteFunction` is a callable that encapsulates a function definition and
is differentiable under `tf.GradientTape` objects.
"""
def __init__(self, func_graph, attrs=None, signature=None):
"""Initialize a `ConcreteFunction`.
Args:
func_graph: An instance of FuncGraph: the function body to wrap.
attrs: (optional) dict mapping names of attributes to their AttrValue
values. Attributes in `attrs` will be included in this function's
definition.
signature: a nested sequence of `TensorSpec` objects specifying the input
signature of this function.
Raises:
ValueError: If number of input_placeholders is not equal to the number
of function inputs.
"""
self._arg_keywords = None
self._num_positional_args = None
self._func_graph = func_graph
self._captured_inputs = list(self._func_graph.captures.keys())
self._captured_closures = [
x[0] for x in self._func_graph.deferred_captures.values()]
self._num_outputs = len(self._func_graph.outputs)
self._output_shapes = tuple(
output.shape for output in self._func_graph.outputs)
self._attrs = _parse_func_attrs(attrs or {})
self._inference_function = _EagerDefinedFunction(
_inference_name(self._func_graph.name), self._func_graph,
self._func_graph.inputs, self._func_graph.outputs, self._attrs)
self._backward_graph_function = None
self._signature = signature
self._gradient_name = None
def __call__(self, *args, **kwargs):
"""Executes the wrapped function.
Args:
*args: Tensors or Variables. Positional arguments are only accepted when
they correspond one-to-one with arguments of the traced Python function.
**kwargs: Tensors or Variables specified by name. When
`get_concrete_function` was called to create this `ConcreteFunction`,
each Tensor input was given a name, defaulting to the name of the Python
function's argument but possibly overridden by the `name=` argument to
`tf.TensorSpec`. These names become the argument names for the concrete
function.
Returns:
The result of applying the TF function on the given Tensors.
Raises:
AssertionError: If this `ConcreteFunction` was not created through
`get_concrete_function`.
ValueError: If arguments contains anything other than Tensors or
Variables.
TypeError: For invalid positional/keyword argument combinations.
"""
if self._arg_keywords is None or self._num_positional_args is None:
if self._signature is not None:
if kwargs:
raise NotImplementedError(
"Keyword arguments not supported when calling a "
"wrap_function-decorated function.")
return self._call_flat(args, self.captured_inputs)
raise AssertionError(
"Tried to call a concrete function obtained from an internal API "
"through the public interface. Use get_concrete_function instead.")
if len(args) > self._num_positional_args:
raise TypeError(
("Expected at most {} positional arguments (and the rest keywords, "
"of {}), got {}. When calling a concrete function, positional "
"arguments may not be bound to Tensors within nested structures."
).format(self._num_positional_args, self._arg_keywords, args))
args = list(args)
for keyword in self._arg_keywords[len(args):]:
try:
args.append(kwargs.pop(compat.as_str(keyword)))
except KeyError:
specified_keywords = (list(self._arg_keywords[:len(args)])
+ list(kwargs.keys()))
raise TypeError(
"Expected argument names {} but got values for {}. Missing: {}."
.format(
list(self._arg_keywords),
specified_keywords,
list(set(self._arg_keywords) - set(specified_keywords))))
if kwargs:
positional_arg_keywords = set(self._arg_keywords[:len(args)])
for unused_key in kwargs:
if unused_key in positional_arg_keywords:
raise TypeError("Got two values for keyword '{}'.".format(unused_key))
raise TypeError("Keyword arguments {} unknown. Expected {}.".format(
list(kwargs.keys()), list(self._arg_keywords)))
return self._call_flat(args, self.captured_inputs)
def _filtered_call(self, args, kwargs):
"""Executes the function, filtering arguments from the Python function.
Objects aside from Tensors, CompositeTensors, and Variables are ignored.
CompositeTensors are expanded into their components.
Args:
args: Canonicalized positional arguments of the Python function.
kwargs: Canonicalized keyword arguments of the Python function.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
"""
return self._call_flat(
(t for t in nest.flatten((args, kwargs), expand_composites=True)
if isinstance(t, (ops.Tensor,
resource_variable_ops.BaseResourceVariable))),
self.captured_inputs)
def _call_flat(self, args, captured_inputs):
"""Executes the wrapped function.
Args:
args: a list of Tensors or Variables. Any CompositeTensors should be
expanded before calling this method.
captured_inputs: the captured inputs that are also part of the input args
to the actual execution. By default, it should be self._captured_inputs.
Returns:
The result of applying the TF function to `args`.
Raises:
ValueError: If `args` contains anything other than Tensors or Variables.
"""
args = list(args)
ctx = context.context()
executing_eagerly = ctx.executing_eagerly()
if any(isinstance(a, composite_tensor.CompositeTensor) for a in args):
raise AssertionError("Expected all args to be Tensors or Variables; "
"but got CompositeTensor: %r" % args)
if (tape.could_possibly_record() or
hasattr(ops.get_default_graph(), "watch_variable")):
for v in self._func_graph.variables:
resource_variable_ops.variable_accessed(v)
tensor_inputs = []
variables_used = set([])
for i, arg in enumerate(args):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# We can pass a variable more than once, and in this case we need to
# pass its handle only once.
if arg.handle in variables_used:
continue
resource_variable_ops.variable_accessed(arg)
tensor_inputs.append(arg.handle)
variables_used.add(arg.handle)
elif isinstance(arg, ops.Tensor):
tensor_inputs.append(arg)
if not executing_eagerly:
# If we're graph building, shape inference is on. We check for input
# compatibility up front to avoid hard to debug incompatibilities
# later.
graph_input_shape = tensor_shape.TensorShape(
self._func_graph.inputs[i].shape)
if not graph_input_shape.is_compatible_with(arg.shape):
if self._arg_keywords:
arg_name = "'{}'".format(self._arg_keywords[i])
else:
arg_name = "with index {}".format(i)
raise ValueError(
("The argument {} (value {}) is not compatible with the shape "
"this function was traced with. Expected shape {}, but got "
"shape {}.\n\nIf you called get_concrete_function, you may "
"need to pass a tf.TensorSpec(..., shape=...) with a less "
"specific shape, having None on axes which can vary.").format(
arg_name, arg,
self._func_graph.inputs[i].shape,
arg.shape))
elif (self._signature is not None and
isinstance(self._signature[i], tensor_spec.TensorSpec)):
tensor_inputs.append(
ops.convert_to_tensor(arg, self._signature[i].dtype))
else:
raise ValueError("All inputs to `ConcreteFunction`s must be Tensors; "
"on invocation of %s, the %d-th input (%s) was not a "
"Tensor." % (self._func_graph.name, i, str(arg)))
args = tensor_inputs + captured_inputs
if (tape.should_record(tensor_inputs) or
tape.should_record(captured_inputs)):
if context.executing_eagerly():
return self._eager_backprop_call(args)
else:
return self._backprop_call_with_delayed_rewrite(args)
# Only need to override the gradient in graph mode and when we have outputs.
if context.executing_eagerly() or not self.outputs:
outputs = self._inference_function.call(ctx, args)
else:
self._register_gradient()
with ops.get_default_graph().gradient_override_map(
{"PartitionedCall": self._gradient_name,
"StatefulPartitionedCall": self._gradient_name}):
outputs = self._inference_function.call(ctx, args)
return self._build_call_outputs(outputs)
def _register_gradient(self):
"""Registers the gradient for this `ConcreteFunction`.
The gradient rewrites an inference call op to a forward call op, but does
not modify a pre-existing forward call op. It then computes the gradient
from the output's gradients and the side outputs of the forward op.
"""
if self._gradient_name:
return
self._gradient_name = "PartitionedCall-%s" % ops.uid()
@ops.RegisterGradient(self._gradient_name)
def _registered_grad_fn(op, *doutputs): # pylint: disable=unused-variable
return self._grad_fn(op, *doutputs)
def _grad_fn(self, op, *doutputs):
"""Gradients of this function."""
if self._backward_graph_function is None:
self._construct_backprop_function()
# pylint: disable=protected-access
self._forward_function.add_to_graph(op.graph)
num_inference_outputs = self._inference_function._num_outputs
# Rewrite an inference call op to be a forward call op
if op.get_attr("f").name.encode() == self._inference_function.name:
op._set_func_attr("f", self._forward_function.name)
op._set_type_list_attr("Tout", self._forward_function._output_types)
op._add_outputs(
self._forward_function._output_types[num_inference_outputs:],
self._forward_function._output_shapes[num_inference_outputs:])
for i in range(num_inference_outputs, len(op.outputs)):
func_graph_output = self._forward_function._func_graph_outputs[i]
custom_gradient.copy_handle_data(func_graph_output, op.outputs[i])
# pylint: enable=protected-access
# Compute the gradients using the side outputs
side_outputs = op.outputs[num_inference_outputs:]
args = list(doutputs[:num_inference_outputs]) + list(side_outputs)
return self._backward_graph_function._call_flat( # pylint: disable=protected-access
(a for a in args if a is not None),
self._backward_graph_function.captured_inputs)
@property
def name(self):
"""`ConcreteFunction` name."""
return self._inference_function.name
@property
def graph(self):
"""Returns the graph from which this function was constructed."""
return self._func_graph
@property
def inputs(self):
"""Returns tensors in `self.graph` corresponding to arguments."""
return self._func_graph.inputs
@property
def structured_input_signature(self):
"""Returns structured signature of the original function."""
return self._func_graph.structured_input_signature
@property
def outputs(self):
"""Returns tensors in `self.graph` corresponding to returned tensors."""
return self._func_graph.outputs
@property
def structured_outputs(self):
"""Returns outputs in `self.graph` as returned by the original function."""
return self._func_graph.structured_outputs
@property
def captured_inputs(self):
"""Returns external Tensors captured by this function.
self.__call__(*args) passes `args + self.captured_inputs` to the function.
"""
return self._captured_inputs + [x() for x in self._captured_closures]
@property
def function_def(self):
"""Returns a `FunctionDef` object representing this function."""
return self._inference_function.definition
@property
def output_shapes(self):
"""The function's output shapes."""
return nest.map_structure(
lambda x: getattr(x, 'shape', tensor_shape.TensorShape(None)),
composite_tensor.replace_composites_with_components(
self._func_graph.structured_outputs),
expand_composites=False)
@property
def output_dtypes(self):
# TODO(akshayka): Consider removing this.
return nest.map_structure(
lambda x: x.dtype if x is not None else None,
composite_tensor.replace_composites_with_components(
self._func_graph.structured_outputs),
expand_composites=False)
def add_to_graph(self, g=None, register_gradient_functions=False):
"""Registers the function, adds it to the graph g or default graph."""
# If we are not executing eagerly, adds the function to default graph if no
# graph is specified.
# In case of eager execution, function definition gets added to context
# during construction itself.
# TODO(allenl/shivaniagrawal): rename this to register to reflect the
# method's functionality better. Remove register_gradient_functions argument
# and figure out if these needs to be registered.
if not context.executing_eagerly() and not g:
g = ops.get_default_graph()
self._inference_function.add_to_graph(g) # pylint: disable=protected-access
# pylint: disable=protected-access
if register_gradient_functions:
# There are two situations for the actual call of a defun:
# 1. If none of the input args are resource variables or watch by any
# tape, and it will run the _inference_function of concrete_func for
# forward pass, the gradient will be generated by standard mechanism.
# 2. Otherwise, defun will create two functions, one for forward pass,
# and the backward pass will be created via tape.
# When registering the function, we register both cases.
if self._backward_graph_function is None:
self._construct_backprop_function()
forward_function = self._forward_function
backward_function = self._backward_graph_function._inference_function
# pylint: enable=protected-access
forward_function.add_to_graph(g)
backward_function.add_to_graph(g)
def _construct_backprop_function(self):
"""Constructs the backprop function object for this function."""
backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
# Keep track of the forward graph so that if the backwards graph
# tries to capture tensors those will be correctly captured first in
# the forward graph. This is an edge case that can only happen with
# tf.custom_gradient.
backwards_graph._forward_func_graph = self._func_graph # pylint: disable=protected-access
forward_function_name = _forward_name(self._func_graph.name)
outputs = [x for x in self._func_graph.outputs
if gradients_util.IsTrainable(x)]
with backwards_graph.as_default():
gradients_wrt_outputs = [
graph_placeholder(x.dtype, x.shape) for x in outputs
]
gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access
outputs,
self._func_graph.inputs,
grad_ys=gradients_wrt_outputs,
src_graph=self._func_graph)
backwards_graph_captures = list(backwards_graph.captures.keys())
backward_function_attr = _parse_func_attrs(
{FORWARD_FUNCTION_ATTRIBUTE_NAME: forward_function_name})
backward_function_attr.update(self._attrs)
# The ordering of `backwards_graph.inputs` is important: inputs of
# `self._backward_graph_function` correspond to outputs of
# `self._forward_function`.
backwards_graph.inputs = gradients_wrt_outputs + list(
backwards_graph.captures.values())
# Clear captures, since we pass them in as inputs.
backwards_graph.captures = {}
backwards_graph.outputs.extend(
grad
for grad in nest.flatten(gradients_wrt_inputs, expand_composites=True)
if grad is not None)
backwards_graph.structured_outputs = gradients_wrt_inputs
self._backward_graph_function = ConcreteFunction(
backwards_graph, attrs=backward_function_attr)
forward_function_attr = _parse_func_attrs({
BACKWARD_FUNCTION_ATTRIBUTE_NAME:
self._backward_graph_function._inference_function.name}) # pylint: disable=protected-access
forward_function_attr.update(self._attrs)
self._forward_function = _EagerDefinedFunction(
forward_function_name, self._func_graph, self._func_graph.inputs,
self._func_graph.outputs + backwards_graph_captures,
forward_function_attr)
def _eager_backprop_call(self, args):
"""Calls the forward function and records the result on a tape.
This method fully constructs the forward and backward functions before
calling the function and recording them on the tape.
(Only records results on a tape if the function has outputs).
Args:
args: All inputs to the function, including resolved captured inputs
Returns:
The call output.
"""
if self._backward_graph_function is None:
self._construct_backprop_function()
ctx = context.context()
self._register_gradient()
with ops.get_default_graph().gradient_override_map(
{"PartitionedCall": self._gradient_name,
"StatefulPartitionedCall": self._gradient_name}):
outputs = self._forward_function.call(ctx, args)
if isinstance(outputs, ops.Operation) or outputs is None:
return outputs
# `real_outputs` are the actual outputs of the inference graph function;
# `side_outputs` are the intermediate Tensors that were added as outputs to
# the forward graph function so that we can compute its gradient.
real_outputs = outputs[:self._num_outputs]
skip_positions = [i for i, t in enumerate(real_outputs)
if not gradients_util.IsTrainable(t)]
side_outputs = outputs[self._num_outputs:]
def backward_function(*args):
args = [a for i, a in enumerate(args)
if a is not None and i not in skip_positions]
return self._backward_graph_function._call_flat( # pylint: disable=protected-access
list(args) + side_outputs,
self._backward_graph_function.captured_inputs)
tape.record_operation(self._forward_function.signature.name, real_outputs,
args, backward_function)
return self._build_call_outputs(real_outputs)
def _backprop_call_with_delayed_rewrite(self, args):
"""Calls the inference function and records the result on a tape.
The recorded backwards function will construct the backwards graph and
rewrite the inference function to the forward function. This only happens
if the recorded backwards function ends up being used to compute gradients.
This approach avoids constructing unnecessary graphs, but it only works if
we are calling this function when not executing eagerly.
(Only records results on a tape if the function has outputs)
Args:
args: All inputs to the function, including resolved captured inputs
Returns:
The call output.
"""
ctx = context.context()
self._register_gradient()
with ops.get_default_graph().gradient_override_map(
{"PartitionedCall": self._gradient_name,
"StatefulPartitionedCall": self._gradient_name}):
outputs = self._inference_function.call(ctx, args)
if isinstance(outputs, ops.Operation) or outputs is None:
return outputs
call_op = outputs[0].op
def backward_function(*args):
return self._grad_fn(call_op, *args)
tape.record_operation(self._inference_function.signature.name, outputs,
args, backward_function)
return self._build_call_outputs(outputs)
def _build_call_outputs(self, result):
"""Maps the fdef output list to actual output structure.
Args:
result: Output lists defined by FunctionDef.
Returns:
The actual call output.
"""
if self._func_graph.structured_outputs is None:
return result
# Replace outputs with results, skipping over any 'None' values.
outputs_list = nest.flatten(self._func_graph.structured_outputs,
expand_composites=True)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
outputs_list[i] = result[j]
j += 1
ret = nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list, expand_composites=True)
return ret
pywrap_tensorflow.RegisterType("Tensor", ops.Tensor)
pywrap_tensorflow.RegisterType("IndexedSlices", ops.IndexedSlices)
def _deterministic_dict_values(dictionary):
return tuple(dictionary[key] for key in sorted(dictionary))
class FunctionSpec(object):
"""Specification of how to bind arguments to a function."""
@staticmethod
def from_function_and_signature(python_function, input_signature):
"""Create a FunctionSpec instance given a python function and signature."""
fullargspec = tf_inspect.getfullargspec(python_function)
# Treat a wrapped partial function as a special case. For all arguments that
# were overridden with keywords in the partial:
# - remove the corresponding arguments,
# - remove the corresponding keywords.
_, unwrapped = tf_decorator.unwrap(python_function)
# TODO(b/131153379): Consider Python3's fullargspec.kwonlyargs and
# fullargspec.kwonlydefaults.
if isinstance(unwrapped, functools.partial):
# Also consider the Python3 case with kwonlydefaults.
if fullargspec.defaults or fullargspec.kwonlydefaults:
new_defaults = fullargspec.defaults
new_args = fullargspec.args
if fullargspec.defaults:
# To be able to canonicalize the function properly, we want to ignore
# default values that are overridden via a partial kwarg. For example:
#
# def func(a, b, c, d=5, e=7):
# return a, b, c, d, e
# p_func = functools.partial(tf.function(func, 10, e=9))
#
# Here we want to drop from the defaults the parameter `e`. If we
# forwarded the call to the partial function with a default for `e`
# we would get an error for passing two values for one parameter.
#
# Note that this has a limitation: we can only override parameters at
# the end of the parameter list.
#
# In this case we want to end up with 3 arguments (b, c, d) and 1
# default value (5). We do this by constructing a mask where 0 stands
# for a value that was overridden by a partial kwarg. The seemingly
# complicated logic below does just that - for arguments (b, c, d, e)
# we would get a mask (1, 1, 1, 0).
old_args = fullargspec.args
old_defaults = fullargspec.defaults
no_default = object()
num_args_without_defaults = len(old_args) - len(old_defaults)
left_padding = tuple([no_default] * num_args_without_defaults)
args_with_defaults = zip(old_args, left_padding + old_defaults)
# Create a mask where 0 stands for args that had a partial kwarg
# defined.
non_keyword_defaults_mask = [
0 if key in unwrapped.keywords else 1 for key in old_args
]
# Keep only arguments and defaults that were not kwargs of partial.
new_args_with_defaults = list(
itertools.compress(args_with_defaults, non_keyword_defaults_mask))
# Keep all args.
new_args = [arg for arg, _ in new_args_with_defaults]
# Keep only real default values.
new_defaults = [
default for _, default in new_args_with_defaults
if default is not no_default
]
fullargspec = tf_inspect.FullArgSpec(
args=new_args,
varargs=fullargspec.varargs,
varkw=fullargspec.varkw,
defaults=new_defaults,
kwonlyargs=[],
kwonlydefaults={},
annotations=fullargspec.annotations)
is_method = tf_inspect.ismethod(python_function)
return FunctionSpec(fullargspec, is_method, [], {}, input_signature)
def __init__(self, fullargspec, is_method, args_to_prepend, kwargs_to_include,
input_signature):
self._fullargspec = fullargspec
self._is_method = is_method
del args_to_prepend
del kwargs_to_include
self._default_values = fullargspec.defaults
if self._is_method:
# Remove `self`: default arguments shouldn't be matched to it.
# TODO(b/127938157): Should this error out if there is no arg to
# be removed?
args = fullargspec.args[1:]
else:
args = fullargspec.args
# A cache mapping from argument name to index, for canonicalizing
# arguments that are called in a keyword-like fashion.
self._args_to_indices = {arg: i for i, arg in enumerate(args)}
self.arg_names = args
self.vararg_name = fullargspec.varargs
# A cache mapping from arg index to default value, for canonicalization.
offset = len(args) - len(self._default_values or [])
self._arg_indices_to_default_values = {
offset + index: default
for index, default in enumerate(self._default_values or [])
}
if input_signature is None:
self._input_signature = None
else:
if fullargspec.kwonlyargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
if not isinstance(input_signature, (tuple, list)):
raise TypeError("input_signature must be either a tuple or a "
"list, received " + str(type(input_signature)))
self._input_signature = tuple(input_signature)
self._flat_input_signature = tuple(nest.flatten(input_signature,
expand_composites=True))
@property
def fullargspec(self):
return self._fullargspec
@property
def is_method(self):
return self._is_method
@property
def args_to_prepend(self):
return self._args_to_prepend
@property
def kwargs_to_include(self):
return self._kwargs_to_include
@property
def input_signature(self):
return self._input_signature
@property
def flat_input_signature(self):
return self._flat_input_signature
def canonicalize_function_inputs(self, *args, **kwargs):
"""Canonicalizes `args` and `kwargs`.
Canonicalize the inputs to the Python function using a `FunctionSpec`
instance. In particular, we parse the varags and kwargs that the
original function was called with into a tuple corresponding to the
Python function's positional (named) arguments and a dictionary
corresponding to its kwargs.
Args:
*args: The varargs this object was called with.
**kwargs: The keyword args this function was called with.
Returns:
A canonicalized ordering of the inputs representened by a tuple in the
form (args, kwargs). Here: `args` is a full list of bound arguments, and
`kwargs` contains only true keyword arguments, as opposed to named
arguments called in a keyword-like fashion.
Raises:
ValueError: If a keyword in `kwargs` cannot be matched with a positional
argument when an input signature is specified, or when the inputs
do not conform to the input signature.
"""
if self._input_signature is not None:
if len(args) > len(self._input_signature):
raise TypeError(
"When input_signature is provided, only pass arguments "
"covered by it. Received %d argument(s)." % len(args))
for arg in six.iterkeys(kwargs):
index = self._args_to_indices.get(arg, None)
if index is None:
raise TypeError(
"Function got an unexpected keyword argument %s" % arg)
if index >= len(self._input_signature):
raise TypeError(
"When input_signature is provided, only pass arguments "
"covered by it. Received argument %s." % arg)
if not kwargs:
inputs = args
default_keys = sorted(self._arg_indices_to_default_values.keys())
if default_keys:
assert min(default_keys) <= len(
args), "Not enough arguments (%s, %s, %s)" % (args, default_keys,
self.arg_names)
for index in default_keys:
if index >= len(args):
inputs += (self._arg_indices_to_default_values[index],)
else:
# Maps from index of arg to its corresponding value, according to `args`
# and `kwargs`; seeded with the default values for the named args that
# aren't in `args`.
arg_indices_to_values = {
index: default for index, default in six.iteritems(
self._arg_indices_to_default_values) if index >= len(args)
}
consumed_args = []
for arg, value in six.iteritems(kwargs):
index = self._args_to_indices.get(arg, None)
if index is not None:
arg_indices_to_values[index] = value
consumed_args.append(arg)
elif self._input_signature is not None:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
for arg in consumed_args:
# After this loop, `kwargs` will only contain true keyword arguments, as
# opposed to named arguments called in a keyword-like fashion.
kwargs.pop(arg)
inputs = args + _deterministic_dict_values(arg_indices_to_values)
if self._input_signature is None:
inputs = _convert_numpy_inputs(inputs)
return inputs, kwargs
else:
assert not kwargs
inputs = _convert_inputs_to_signature(
inputs,
self._input_signature,
self._flat_input_signature)
return inputs, {}
def _convert_numpy_inputs(inputs):
"""Convert numpy array inputs to tensors."""
# We assume that any CompositeTensors have already converted their components
# from numpy arrays to Tensors, so we don't need to expand composites here.
flat_inputs = nest.flatten(inputs, expand_composites=False)
# Check for NumPy arrays in arguments and convert them to Tensors.
# TODO(nareshmodi): Skip ndarray conversion to tensor altogether, perhaps
# finding a way to store them directly in the cache key (currently not
# possible since ndarrays are not hashable).
need_packing = False
for index, value in enumerate(flat_inputs):
if type(value) == np.ndarray:
flat_inputs[index] = constant_op.constant(value)
need_packing = True
if need_packing:
return nest.pack_sequence_as(
structure=inputs, flat_sequence=flat_inputs, expand_composites=False)
else:
return inputs
def _convert_inputs_to_signature(inputs, input_signature, flat_input_signature):
"""Convert inputs to pass into a function with an explicit signature."""
try:
# TODO(b/124370185): Use all elements as inputs to throw an error if there
# are ignored arguments. Calling with arguments that are not part of the
# signature should throw an error.
flatten_inputs = nest.flatten_up_to(
input_signature,
inputs[:len(input_signature)],
expand_composites=True)
except ValueError:
raise ValueError("Structure of Python function inputs does not match "
"input_signature. Inputs (%s), input_signature(%s)." %
(str(inputs), str(input_signature)))
need_packing = False
for index, (value, spec) in enumerate(zip(flatten_inputs,
flat_input_signature)):
if not pywrap_tensorflow.IsTensor(value):
try:
flatten_inputs[index] = ops.convert_to_tensor(
value, dtype_hint=spec.dtype)
need_packing = True
except ValueError:
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be convertible to tensors."
"Inputs (%s), input_signature(%s)." %
(str(inputs), str(input_signature)))
if any(not spec.is_compatible_with(other) for spec, other in zip(
flat_input_signature,
flatten_inputs)):
raise ValueError("Python inputs incompatible with input_signature: "
"inputs (%s), input_signature (%s)" %
(str(inputs), str(input_signature)))
if need_packing:
inputs = nest.pack_sequence_as(
structure=input_signature,
flat_sequence=flatten_inputs,
expand_composites=True)
return inputs
class FunctionCache(object):
"""A lightweight container for cached functions.
"""
def __init__(self):
# The set of functions that have been missed; entries are CacheKey with
# input_signature `None` (e.g. a "call context key")
self.missed = set()
# The primary cache, mapping a fully shaped CacheKey to a function.
self.primary = collections.OrderedDict()
# A cache key lookup, mapping a CacheKey generated without shape info to a
# flat list of relaxed shapes (one for each argument). Arguments that are
# not Tensors contain a `None` for the corresponding relaxed shape.
self.arg_relaxed_shapes = collections.OrderedDict()
# The secondary cache, mapping a CacheKey generated without shape info to a
# function.
self.arg_relaxed = collections.OrderedDict()
# All OrderedDicts require manual garbage collection.
self._garbage_collectors = [
_FunctionGarbageCollector(self.primary),
_FunctionGarbageCollector(self.arg_relaxed),
_FunctionGarbageCollector(self.arg_relaxed_shapes)]
def all_values(self):
"""A set of all `ConcreteFunction` instances held by this cache."""
return set(self.primary.values()) | set(self.arg_relaxed.values())
class Function(object):
"""Wrapper class for the graph functions defined for a Python function.
See the documentation for `defun` for more information on the semantics of
defined functions.
`Function` class is thread-compatible meaning that minimal usage of defuns
(defining and calling) is thread-safe, but if users call other methods or
invoke the base `python_function` themselves, external synchronization is
necessary.
"""
def __init__(self,
python_function,
name,
input_signature=None,
attributes=None,
autograph=True,
autograph_options=None,
experimental_relax_shapes=False,
capture_by_value=None):
"""Initializes a `Function`.
Args:
python_function: the function to be wrapped.
name: the name given to it.
input_signature: a possibly nested sequence of `TensorSpec` objects
specifying the input signature of this function. If `None`, a separate
function is instantiated for each inferred input signature.
attributes: dict, extra keyword arguments that will be added as attribute
of the function.
autograph: whether to use autograph to compile
`python_function`. See https://www.tensorflow.org/guide/autograph for
more information.
autograph_options: Experimental knobs to control behavior
`when autograph=True`. See https://www.tensorflow.org/guide/autograph
for more information.
experimental_relax_shapes: When true, argument shapes may be relaxed to
avoid unecessary retracing.
capture_by_value: Experimental. Whether to capture resource variables by
value or reference. If None, will inherit from a parent context or
default to False.
Raises:
ValueError: if `input_signature` is not None and the `python_function`'s
argspec has keyword arguments.
"""
self._python_function = python_function
self._function_spec = FunctionSpec.from_function_and_signature(
python_function, input_signature)
self._name = name
self._autograph = autograph
self._autograph_options = autograph_options
self._experimental_relax_shapes = experimental_relax_shapes
self._function_cache = FunctionCache()
self._function_attributes = attributes or {}
self._capture_by_value = capture_by_value
self._lock = threading.Lock()
# _descriptor_cache is a of instance of a class to an instance-specific
# `Function`, used to make sure defun-decorated methods create different
# functions for each instance.
self._descriptor_cache = weakref.WeakKeyDictionary()
def __call__(self, *args, **kwargs):
"""Calls a graph function specialized to the inputs."""
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
@property
def python_function(self):
"""Returns the wrapped Python function."""
return self._python_function # pylint: disable=protected-access
@property
def function_spec(self):
return self._function_spec
@property
def input_signature(self):
"""Returns the input signature."""
return self._function_spec.input_signature
@property
def flat_input_signature(self):
"""Returns the flattened input signature."""
return self._function_spec.flat_input_signature
def _get_concrete_function_internal_garbage_collected(self, *args, **kwargs):
"""Returns a concrete function which cleans up its graph function."""
if self.input_signature:
args, kwargs = None, None
graph_function, _, _ = self._maybe_define_function(args, kwargs)
return graph_function
def _get_concrete_function_internal(self, *args, **kwargs):
"""Bypasses error checking when getting a graph function."""
graph_function = self._get_concrete_function_internal_garbage_collected(
*args, **kwargs)
# We're returning this concrete function to someone, and they may keep a
# reference to the FuncGraph without keeping a reference to the
# ConcreteFunction object. So we won't clean up the reference cycles
# manually and instead will leave them to Python's garbage collector.
graph_function._garbage_collector.release() # pylint: disable=protected-access
return graph_function
def get_concrete_function(self, *args, **kwargs):
"""Returns a `ConcreteFunction` specialized to inputs and execution context.
Args:
*args: inputs to specialize on.
**kwargs: inputs to specialize on.
"""
if self.input_signature:
if kwargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
if args:
# If args are provided, they must match the input signature.
if not is_same_structure(self.input_signature, args):
raise ValueError("Structure of Python function inputs does not match "
"input_signature.")
flat_inputs = nest.flatten(args, expand_composites=True)
if any(not isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec))
for arg in flat_inputs):
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be Tensors or "
"tf.TensorSpec objects.")
if any(not spec.is_compatible_with(other)
for spec, other in zip(self.flat_input_signature, flat_inputs)):
raise ValueError("Python inputs incompatible with input_signature: "
"inputs (%s), input_signature (%s)" %
(str(args), str(self.input_signature)))
args, kwargs = None, None
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
if self.input_signature:
args = self.input_signature
kwargs = {}
seen_names = set()
captured = frozenset(graph_function.graph.internal_captures)
# pylint: disable=protected-access
graph_function._arg_keywords = []
prefix_counts = {}
# pylint: enable=protected-access
num_positional = 0
for arg in graph_function.graph.inputs:
if arg in captured:
break
num_positional += 1
user_arg_name = compat.as_str(arg.op.get_attr("_user_specified_name"))
proposal = user_arg_name
while proposal in seen_names:
index = prefix_counts.get(user_arg_name, 1)
proposal = "{}_{}".format(user_arg_name, index)
prefix_counts[user_arg_name] = index + 1
seen_names.add(proposal)
graph_function._arg_keywords.append(proposal) # pylint: disable=protected-access
# Anything can be a positional argument, in the same order as .inputs
graph_function._num_positional_args = num_positional # pylint: disable=protected-access
return graph_function
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
# `instance` here is the instance that this `Function` was accessed through
# e.g., for
#
# class Foo(object):
#
# @function.defun
# def bar(self):
# ...
#
# foo = Foo()
# foo.bar() # `foo.bar` is a `Function` instance
#
# then `instance` will be `foo` (and `owner` will be `Foo`). We create a
# new instance of `Function` here to allow different instances each
# to create variables once, thereby allowing methods to be decorated with
# defun. Keeps a cache to avoid retracing the function every time the
# descriptor is accessed.
if instance not in self._descriptor_cache:
if instance is None:
return self
# If there is no instance-specific `Function` in the cache, we construct
# an instance-specific `Function` that uses a weak reference to the
# instance (so that the instance will be correctly gc'd).
# And finally add the wrapped function to the description cache
self._descriptor_cache[instance] = class_method_to_instance_method(
self, instance)
# Return the cached `Function` for the instance
return self._descriptor_cache[instance]
def _cache_key(self, args, kwargs, include_tensor_ranks_only=False):
"""Computes the cache key given inputs and execution context."""
if self.input_signature is None:
inputs = (args, kwargs) if kwargs else args
input_signature = pywrap_tensorflow.TFE_Py_EncodeArg(
inputs, include_tensor_ranks_only)
else:
del args, kwargs
assert not include_tensor_ranks_only
input_signature = self.flat_input_signature
ctx = context.context()
# Don't need to open an init_scope if the _cache_key call is in eager mode
# already.
executing_eagerly = ctx.executing_eagerly()
parent_graph = None
if not executing_eagerly:
with ops.init_scope():
# The graph, or whether we're executing eagerly, should be a part of the
# cache key so we don't improperly capture tensors such as variables.
executing_eagerly = ctx.executing_eagerly()
parent_graph = None if executing_eagerly else ops.get_default_graph()
# pylint: disable=protected-access
default_graph = ops.get_default_graph()
# TODO(b/117617952): The current distribution strategy will affect graph
# building (e.g. accessing different variables from different devices) and
# so requires retracing for each device.
strategy_stack = default_graph._distribution_strategy_stack
uses_distribution_strategy = (
strategy_stack and
strategy_stack[-1].strategy.extended._retrace_functions_for_each_device
)
if executing_eagerly:
colocation_stack = ()
if uses_distribution_strategy:
device_functions = (pydev.merge_device(ctx.device_name),)
else:
device_functions = ()
else:
colocation_stack = tuple(default_graph._colocation_stack.peek_objs())
if (uses_distribution_strategy
or func_graph_module.device_stack_has_callable(
default_graph._device_function_stack)):
# Putting the device in the cache key ensures that call-site device
# annotations are respected.
device_functions = tuple(default_graph._device_functions_outer_to_inner)
else:
device_functions = ()
in_cross_replica_context = False
try:
in_cross_replica_context = (strategy_stack[-1].replica_context is None) # pylint: disable=protected-access
except (AttributeError, IndexError):
pass
return CacheKey(input_signature, parent_graph, device_functions,
colocation_stack, in_cross_replica_context)
def _create_graph_function(self, args, kwargs, override_flat_arg_shapes=None):
"""Create a `ConcreteFunction` from `args` and `kwargs`."""
if self.input_signature is None:
arglen = len(args)
else:
arglen = len(self.input_signature)
base_arg_names = self._function_spec.arg_names[:arglen]
num_missing_args = arglen - len(self._function_spec.arg_names)
missing_arg_names = [self._function_spec.vararg_name] * num_missing_args
# Produce a list of missing args of the form ["arg_0", "arg_1", ...],
# where arg is based on the self._function_spec.vararg_name.
missing_arg_names = [
"%s_%d" % (arg, i) for i, arg in enumerate(missing_arg_names)
]
arg_names = base_arg_names + missing_arg_names
graph_function = ConcreteFunction(
func_graph_module.func_graph_from_py_func(
self._name,
self._python_function,
args,
kwargs,
self.input_signature,
autograph=self._autograph,
autograph_options=self._autograph_options,
arg_names=arg_names,
override_flat_arg_shapes=override_flat_arg_shapes,
capture_by_value=self._capture_by_value),
self._function_attributes)
# pylint: disable=protected-access
# Tell the ConcreteFunction to clean up its graph once it goes out of
# scope. ConcreteFunction does not do this in its constructor since it
# gets used in some places (like Keras) where the FuncGraph lives
# longer than the ConcreteFunction.
graph_function._garbage_collector = ConcreteFunctionGarbageCollector(
graph_function.graph)
# pylint: enable=protected-access
return graph_function
def _define_function_with_shape_relaxation(self, args, kwargs):
"""Define a function, relaxing arg shapes to avoid unecessary retracing."""
rank_only_cache_key = self._cache_key(
args, kwargs, include_tensor_ranks_only=True)
arg_shapes = _flat_shape_list(args, kwargs)
relaxed_arg_shapes = self._function_cache.arg_relaxed_shapes.get(
rank_only_cache_key, None)
relaxed_arg_function = self._function_cache.arg_relaxed.get(
rank_only_cache_key, None)
if (relaxed_arg_function is not None
and _compatible_shapes(flat_relaxed=relaxed_arg_shapes,
flat_to_check=arg_shapes)):
return relaxed_arg_function, args, kwargs
if relaxed_arg_shapes is None:
relaxed_arg_shapes = arg_shapes
else:
if len(arg_shapes) != len(relaxed_arg_shapes):
raise RuntimeError("Expected arg_shapes len to match "
"relaxed_arg_shapes len: %d vs. %d"
% (len(arg_shapes), len(relaxed_arg_shapes)))
relaxed_arg_shapes = [
common_shape(x, y) for (x, y) in zip(
arg_shapes, relaxed_arg_shapes)]
self._function_cache.arg_relaxed_shapes[rank_only_cache_key] = (
relaxed_arg_shapes)
graph_function = self._create_graph_function(
args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function
return graph_function, args, kwargs
def _maybe_define_function(self, args, kwargs):
"""Gets a function for these inputs, defining it if necessary.
`args` and `kwargs` can be None if this `Function` was created with an
`input_signature`.
Args:
args: The varargs for the Python function.
kwargs: The keyword args for the Python function.
Returns:
A graph function corresponding to the input signature implied by args and
kwargs, as well as the inputs that the object should be called with.
Raises:
ValueError: If inputs are incompatible with the input signature.
TypeError: If the function inputs include non-hashable objects
RuntimeError: If there's an internal bug (inconsistency) in handling
shape relaxation retracing.
"""
if self.input_signature is None or args is not None or kwargs is not None:
args, kwargs = self._function_spec.canonicalize_function_inputs(
*args, **kwargs)
cache_key = self._cache_key(args, kwargs)
try:
hash(cache_key)
except TypeError as e:
raise TypeError(
"Arguments supplied to `defun`-generated functions must be"
" hashable. Original error: %s" % e)
with self._lock:
graph_function = self._function_cache.primary.get(cache_key, None)
if graph_function is not None:
return graph_function, args, kwargs
logging.vlog(1,
"Creating new FuncGraph for Python function %r (key: %r)",
self._python_function, cache_key)
logging.vlog(2,
"Python function signature [args: %s] [kwargs: %s]",
args,
kwargs)
call_context_key = cache_key.replace(input_signature=None)
ag_status = (
ag_ctx.Status.ENABLED if self._autograph else ag_ctx.Status.DISABLED)
with ag_ctx.ControlStatusCtx(
status=ag_status, options=self._autograph_options):
# Build a function with shape relaxation retracing if:
# 1. shape relaxation is explicitly enabled
# and 2. there's no provided input signature
# and 3. there's been a cache miss for this calling context
if (self._experimental_relax_shapes
and self.input_signature is None
and call_context_key in self._function_cache.missed):
return self._define_function_with_shape_relaxation(args, kwargs)
self._function_cache.missed.add(call_context_key)
graph_function = self._function_cache.primary.get(cache_key, None)
if graph_function is None:
graph_function = self._create_graph_function(args, kwargs)
self._function_cache.primary[cache_key] = graph_function
return graph_function, args, kwargs
def register(func, *args, **kwargs):
"""Register a specialization of a `Function` into the graph.
This won't actually call the function with the inputs, and only put the
function definition into graph. Register function with different input param
will result into multiple version of functions registered in graph.
Args:
func: the `Function` instance that generated by a @defun
*args: input arguments for the Python function.
**kwargs: input keyword arguments for the Python function.
Returns:
a `ConcreteFunction` object specialized to inputs and execution context.
Raises:
ValueError: When the input function is not a defun wrapped python function.
"""
if not isinstance(func, Function):
raise ValueError("Only defun function is allowed to be registered. "
"Got type: %s" % type(func))
concrete_func = func.get_concrete_function(*args, **kwargs)
concrete_func.add_to_graph(register_gradient_functions=True)
return concrete_func
def validate_signature(signature):
if any(not isinstance(arg, tensor_spec.TensorSpec)
for arg in nest.flatten(signature, expand_composites=True)):
raise TypeError("Invalid input_signature {}; input_signature must be "
"a possibly nested sequence of TensorSpec objects."
.format(signature))
def defun(func=None,
input_signature=None,
autograph=True,
experimental_autograph_options=None,
experimental_relax_shapes=False):
"""Compiles a Python function into a callable TensorFlow graph.
`defun` (short for "define function") compiles a Python function
composed of TensorFlow operations into a callable that executes a `tf.Graph`
containing those operations. The callable produced by `defun` contains only
the subgraph of TensorFlow operations that were executed when the Python
function was called with a particular input signature, defined as a list
of the shapes and dtypes of the Python function's Tensor-valued arguments and
the values of its non-Tensor Python objects.
When eager execution is enabled, the ability to create graphs from Python
functions makes it possible to incrementally trade off debugability and
interactivity for performance. Functions compiled with `defun` cannot be
inspected with `pdb`; however, executing a graph
generated by `defun` sometimes takes less time and memory than eagerly
executing the corresponding Python function, since specifying computations as
graphs allows for optimizations like automatic buffer reuse and
parallelization among ops. Note that executing a `defun`-compiled function
incurs a small constant overhead, so eagerly executing sufficiently small
Python functions might take less time than executing their corresponding
`defun`-generated graphs.
For a Python function to be compatible with `defun`, all of its arguments must
be hashable Python objects or lists thereof. The function itself may not
modify the list/map structure of its arguments. Additionally, it must return
zero or more `tf.Tensor` objects. If the Python function returns
a `tf.Variable`, its compiled version will return the value of that variable
as a `tf.Tensor`.
Executing a graph generated by `defun` respects device annotations (i.e.,
all `with tf.device` directives present in a Python function will also be
present in its corresponding graph), but it is not yet possible to execute the
generated graphs across multiple machines.
_Example Usage_
```python
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
# A simple example.
def f(x, y):
return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)
g = tf.contrib.eager.defun(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# `defun` is capable of compiling Python functions that close over Python
# objects, including Tensors and Variables.
@tf.contrib.eager.defun
def h():
return f(x, y)
assert (h().numpy() == f(x, y).numpy()).all()
# `defun` automatically lifts variables out of the graphs it creates,
# allowing you to compile the `call` methods of `tf.keras.layers.Layer` and
# `tf.keras.Model` objects.
class MyModel(tf.keras.Model):
def __init__(self, keep_probability=0.2):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.keep_probability = keep_probability
@tf.contrib.eager.defun
def call(self, inputs, training=True):
x = self.dense2(self.dense1(inputs))
if training:
return tf.nn.dropout(x, self.keep_probability)
else:
return x
model = MyModel()
model(x, training=True) # executes a graph, with dropout
model(x, training=False) # executes a graph, without dropout
# `defun`-compiled functions are differentiable.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01)
with tf.GradientTape() as tape:
outputs = model(x)
gradient = tape.gradient(outputs, model.trainable_variables)
optimizer.apply_gradients((grad, var) for grad, var in zip(gradient,
model.trainable_variables))
```
When using `defun`, there are subtleties regarding inputs, Python control
flow, and variable creation that one should be aware of. For concreteness, let
`f` be a Python function that returns zero or more `tf.Tensor` objects and
let `F = defun(f)`. `F` builds a graph for each unique input signature it
sees, Python control flow is baked into graphs, and operations related to
variable initialization are automatically lifted out of the graphs that `F`
generates and placed in the eager context if executing eagerly or into an
outer graph otherwise.
_Input Signatures_
By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph
for every unique sequence of the shapes and dtypes of Tensor arguments and
the values of Python objects it is invoked with. For example, calling
`F(tf.random.uniform([2])` will execute a different graph than
`F(tf.random.uniform([3])` because the two inputs have different shapes.
The first time that `F(*args, **kwargs)` is called with a particular sequence
of Tensor shapes and dtypes and Python values, it constructs a graph by
tracing the execution of `f(*args, **kwargs)`; this graph is bound to an
input signature inferred from `(*args, **kwargs)` and cached for future reuse.
NumPy arrays passed as inputs to `F` are converted to `tf.Tensor` objects
before being passed to `f`, and are treated as Tensors for caching. This
allows a function to be called multiple times with NumPy arrays having
different values but the same shape and dtype without re-tracing each time.
`tf.contrib.eager.defun` caches graphs for your convenience, letting you
define TensorFlow functions without explicitly specifying their signatures.
However, this policy is conservative and potentially expensive; for example,
when different invocations of your function have differently-shaped Tensor
inputs, this policy might generate more graph functions than necessary. To
eliminate such costs, `tf.contrib.eager.defun` allows you to supply an
optional `input_signature` argument specifying the shapes and dtypes of the
inputs. In particular, the shapes may be partially unspecified, with `None`s
in the unknown dimensions. When an input signature is provided,
`tf.contrib.eager.defun` will only instantiate a single graph for the
decorated Python function. The following is an example:
```python
import tensorflow as tf
# The first `TensorSpec` below describes the shape and dtype of `words`,
# and the second describes the shape and dtype of `another_tensor`. Note that
# the last dimension of the `words` `TensorSpec` is left unspecified.
@tf.contrib.eager.defun(input_signature=[
tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32),
tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32)
])
def my_sequence_model(words, another_tensor):
...
# Note how the third dimension of the first input can vary freely.
words = tf.random.uniform(([50, 300, 10])
second_input = tf.random.uniform([300, 100])
my_sequence_model(words, second_input)
words = tf.random.uniform(([50, 300, 20])
my_sequence_model(words, second_input)
# Passing an input with an incompatible shape will raise an error.
words = tf.random.uniform(([50, 100, 20])
my_sequence_model(words, second_input) # <---- This will raise an error.
```
Python functions that are compiled with an `input_signature` must only accept
Tensors as arguments and must not take unnamed keyword arguments (**kwargs).
_Tracing_
Be aware that because `F` only logs TensorFlow operations, all the other
Python code that `f` executes will only shape the _construction_ of the graphs
that `F` executes: the Python code won't be executed when the graphs
themselves are executed, though it will be executed every time the Python
function is traced (and a given Python function might be traced multiple
times, once for each input signature it is invoked with). For example, whereas
the Python function
```python
import tensorflow as tf
import numpy as np
tf.compat.v1.enable_eager_execution()
def add_noise():
return tf.eye(5) + np.random.randn(5, 5)
```
will return a different output everytime it is invoked, the compiled function
`compiled = tf.contrib.eager.defun(add_noise)` will return the same value
every time it is called, since a particular random offset generated by NumPy
will be inserted into the graph as a TensorFlow constant. The solution is to
replace the call to `np.random.randn` with `tf.random.normal((5, 5))`.
_Python Side-Effects_
A corollary of the previous discussion on tracing is the following: If a
Python function `f` has Python side-effects, then executing `f` multiple times
will not necessarily be semantically equivalent to executing `F =
tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact
that `defun` only captures the subgraph of TensorFlow operations that is
constructed when `f` is called in a graph-building context.
_Python Control Flow_
The structure of many machine learning computations depend upon whether one is
training or validating, and it is common to nest specialized logic under `if
training:` blocks. By mapping each input signature to a unique graph, `defun`
lets users transparently compile such code, as the following code snippet
demonstrates:
```python
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
@tf.contrib.eager.defun
def lossy_matmul(W, x, training=True):
outputs = tf.matmul(W, x)
if training:
outputs = tf.nn.dropout(outputs, keep_probability=0.2)
return outputs
W = tf.random.normal((3, 5))
x = tf.random.normal((5, 1))
# Executes a graph that applies dropout.
lossy_outputs = lossy_matmul(W, x, training=True)
# Executes a graph that does not apply dropout.
exact_outputs = lossy_matmul(W, x, training=False)
```
_TensorFlow Control Flow_
When `autograph` is `True`, data-dependent control flow is allowed as well.
Control flow statements that depend on `Tensor` values are staged into
corresponding TensorFlow ops. For example, the following code will work as
expected:
```python
@tf.contrib.eager.defun
def dynamic_rnn_loop(cell, seq):
state, output = cell.zero_state()
for input in seq:
state, output = cell(input, state)
return output
```
For more information see `tf.autograph`.
_Variables_
TensorFlow operations related to variable creation and initialization are
automatically lifted out of the graphs generated by `defun`. In practice, this
implies that variable creation and initialization only happen the first time
`F` is called, and that variables are reused every time thereafter. Many
TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the
first time they are called and reuse them thereafter. Automatic variable
lifting makes it possible to compile these APIs without extra effort, at the
cost of introducing a discrepancy between the semantics of executing Python
functions and their corresponding compiled functions. For example:
```python
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
def fn():
x = tf.Variable(0.0)
x.assign_add(1.0)
return x.read_value()
# `fn` is a Python function, so x is created, initialized, and destroyed upon
# every invocation
assert fn().numpy() == fn().numpy() == 1.0
compiled = tf.contrib.eager.defun(fn)
# Compiling `fn` with `defun` hoists all variables outside of the generated
# graph, so initialization happens exactly once.
assert compiled().numpy() == 1.0
assert compiled().numpy() == 2.0
```
Finally, because each input signature is bound to a unique graph, if your
Python function constructs `tf.Variable` objects, then each graph constructed
for that Python function will reference a unique set of variables. To
circumvent this problem, we recommend against compiling Python functions that
create `tf.Variable` objects. Instead, Python functions should either
lexically close over `tf.Variable` objects or accept them as arguments,
preferably encapsulated in an object-oriented container. If you must create
variables inside your Python function and you want each graph generated for it
to reference the same set of variables, add logic to your Python function that
ensures that variables are only created the first time it is called and are
reused for every subsequent invocation; note that this is precisely what
`tf.keras.layers.Layer` objects do, so we recommend using them to represent
variable-bearing computations whenever possible.
Args:
func: function to be compiled. If `func` is None, returns a
decorator that can be invoked with a single argument - `func`. The
end result is equivalent to providing all the arguments up front.
In other words, defun(input_signature=...)(func) is equivalent to
defun(func, input_signature=...). The former allows
the following use case:
@tf.contrib.eager.defun(input_signature=...)
def foo(...):
...
input_signature: A possibly nested sequence of
`tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of
the Tensors that will be supplied to this function. If `None`, a separate
function is instantiated for each inferred input signature. If a
signature is specified, every input to `func` must be a `Tensor`, and
`func` cannot accept `**kwargs`.
autograph: Whether `func` should be compiled before
constructing the graph. See https://www.tensorflow.org/guide/autograph
for more information.
experimental_autograph_options: Experimental knobs (in the form of a tuple
of tensorflow.autograph.Feature values) to control behavior when
autograph=True.
experimental_relax_shapes: When true, argument shapes may be relaxed to
avoid unecessary retracing.
Returns:
If `func` is not None, returns a callable that will execute the compiled
function (and return zero or more `tf.Tensor` objects).
If `func` is None, returns a decorator that, when invoked with a single
`func` argument, returns a callable equivalent to the case above.
Raises:
TypeError: If `input_signature` is neither `None` nor a sequence of
`tf.contrib.eager.TensorSpec` objects.
"""
return defun_with_attributes(
func=func,
input_signature=input_signature,
autograph=autograph,
experimental_autograph_options=experimental_autograph_options,
experimental_relax_shapes=experimental_relax_shapes)
def defun_with_attributes(func=None,
input_signature=None,
attributes=None,
autograph=True,
experimental_autograph_options=None,
experimental_relax_shapes=False):
"""Compiles a Python function into a callable TensorFlow graph.
This function supports adding extra function attributes. See detailed
documentation in defun(). Currently this is not exposed in public API since we
don't expect user to directly use attributes, and attribute won't work by
itself. This assumption might change in future.
Args:
func: function to be compiled.
input_signature: same as defun()'s input_signature.
attributes: A dictionary of arguments which will be added to function def as
attributes. Currently only support primitive types as value, and only
whitelisted attribute name is allowed. Unwhitelisted attribute name or
unsupported value will result into ValueError. `func_name` is also one of
the whitelisted argument which is a python string, and sets the name for
this `ConcreteFunction` in the graph.
autograph: same as defun()'s autograph.
experimental_autograph_options: same as defun()'s
experimental_autograph_options.
experimental_relax_shapes: same as defun()'s experimental_relax_shapes
Returns:
Same as the return value of defun, with attributes added to the function in
graph.
"""
if input_signature is not None:
validate_signature(input_signature)
# TODO(apassos): deal with captured global state. Deal with control flow.
def decorated(function):
try:
if attributes:
name = attributes.pop("func_name", function.__name__)
else:
name = function.__name__
except AttributeError:
name = "function"
return tf_decorator.make_decorator(
function,
Function(
function,
name,
input_signature=input_signature,
attributes=attributes,
autograph=autograph,
autograph_options=experimental_autograph_options,
experimental_relax_shapes=experimental_relax_shapes))
# This code path is for the `foo = tfe.defun(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the
#
# @tfe.defun(...)
# def foo(...):
# ...
#
# use case, which is equivalent to `foo = tfe.defun(...)(foo)`
return decorated
# When a method is bound to objects of this type, it allows AutoGraph to
# recover a weak reference the original method's self pointer, so that it can
# execute it consistent with class_method_to_instance_method's
# bound_method_wrapper.
# TODO(b/119246461): This is not pretty. Use a descriptor instead?
class TfMethodTarget(object):
"""Binding target for methods replaced by function and defun."""
def __init__(self, target, original_python_function):
self.weakrefself_target__ = target
self.weakrefself_func__ = weakref.ref(original_python_function)
@property
def target(self):
return self.weakrefself_target__()
def call(self, args, kwargs):
wrapped_fn = self.weakrefself_func__()
if tf_inspect.ismethod(wrapped_fn):
wrapped_fn = six.get_unbound_function(wrapped_fn)
return wrapped_fn(self.weakrefself_target__(), *args, **kwargs)
def class_method_to_instance_method(original_function, instance):
"""Constructs a new `Function` with `self` bound."""
weak_instance = weakref.ref(instance)
# Note: while we could bind to a weakref proxy instead, that causes the
# bound method to be unhashable.
bound_method = types_lib.MethodType(
original_function.python_function,
TfMethodTarget(weak_instance, original_function.python_function))
# original_function is expected to be of one of the two `Function` types
# (defined either in function.py or def_function.py).
assert hasattr(original_function, "_name")
assert hasattr(original_function, "_autograph")
assert hasattr(original_function, "_function_spec")
assert hasattr(original_function, "python_function")
weak_bound_method_wrapper = None
def bound_method_wrapper(*args, **kwargs):
"""Wraps either a dummy MethodType or a converted AutoGraph function."""
# __wrapped__ allows AutoGraph to swap in a converted function.
strong_bound_method_wrapper = weak_bound_method_wrapper()
wrapped_fn = strong_bound_method_wrapper.__wrapped__
if wrapped_fn is strong_bound_method_wrapper.__original_wrapped__:
# If __wrapped__ was not replaced, then call original_function.
# TODO(mdan): For better consistency, use the wrapper's call().
wrapped_fn = original_function.python_function
if tf_inspect.ismethod(wrapped_fn):
wrapped_fn = six.get_unbound_function(wrapped_fn)
return wrapped_fn(weak_instance(), *args, **kwargs)
# If __wrapped__ was replaced, then it is always an unbound function.
# However, the replacer is still responsible for attaching self properly.
# TODO(mdan): Is it possible to do it here instead?
return wrapped_fn(*args, **kwargs)
weak_bound_method_wrapper = weakref.ref(bound_method_wrapper)
# pylint: disable=protected-access
# We make a dummy MethodType object to generate the correct bound method
# signature. The actual call is to a function with a weak reference to
# `instance`.
instance_func = type(original_function)(
tf_decorator.make_decorator(bound_method, bound_method_wrapper),
name=original_function._name,
autograph=original_function._autograph,
input_signature=original_function.input_signature)
# pylint: enable=protected-access
# And we wrap the function with tf_decorator so inspection works correctly
wrapped_instance_func = tf_decorator.make_decorator(
original_function.python_function, instance_func)
return wrapped_instance_func
class _FunctionGarbageCollector(object):
"""Cleans up cycles when a defun goes out of scope."""
def __init__(self, cache):
self._cache = cache
def __del__(self):
if func_graph_module is None or memory is None:
return
try:
while self._cache:
self._cache.popitem()
memory.dismantle_ordered_dict(self._cache)
except: # pylint: disable=bare-except
pass
class ConcreteFunctionGarbageCollector(object):
"""Cleans up reference cycles when a `ConcreteFunction` goes out of scope."""
def __init__(self, func_graph):
self._func_graph = func_graph
def release(self):
"""Call off the FuncGraph deletion."""
self._func_graph = None
def __del__(self):
if func_graph_module is None or memory is None or self._func_graph is None:
return
try:
func_graph_module.dismantle_func_graph(self._func_graph)
except: # pylint: disable=bare-except
pass
|
tensorflow-master
|
tensorflow/python/eager/function.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory leaks in eager execution.
It is possible that this test suite will eventually become flaky due to taking
too long to run (since the tests iterate many times), but for now they are
helpful for finding memory leaks since not all PyObject leaks are found by
introspection (test_util decorators). Please be careful adding new tests here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import six
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.variables import Variable
from tensorflow.python.training import server_lib
# memory_profiler might not be available in the OSS version of TensorFlow.
try:
import memory_profiler # pylint:disable=g-import-not-at-top
except ImportError:
memory_profiler = None
class SingleLayerNet(keras.Model):
"""Simple keras model used to ensure that there are no leaks."""
def __init__(self):
super(SingleLayerNet, self).__init__()
self.fc1 = keras.layers.Dense(5)
def call(self, x):
return self.fc1(x)
def assert_no_leak(f, num_iters=100000, increase_threshold_absolute_mb=10):
"""Assert memory usage doesn't increase beyond given threshold for f."""
with context.eager_mode():
# Warm up.
f()
# Wait for background threads to start up and take over memory.
# FIXME: The nature of this test leaves few other options. Maybe there
# is a better way to do this.
time.sleep(4)
initial = memory_profiler.memory_usage(-1)[0]
for _ in six.moves.range(num_iters):
f()
increase = memory_profiler.memory_usage(-1)[0] - initial
assert increase < increase_threshold_absolute_mb, (
"Increase is too high. Initial memory usage: %f MB. Increase: %f MB. "
"Maximum allowed increase: %f") % (initial, increase,
increase_threshold_absolute_mb)
class MemoryTest(test.TestCase):
def testMemoryLeakAnonymousVariable(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
def f():
inputs = Variable(array_ops.zeros([32, 100], dtypes.float32))
del inputs
assert_no_leak(f, num_iters=10000)
def testMemoryLeakInSimpleModelForwardOnly(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
inputs = array_ops.zeros([32, 100], dtypes.float32)
net = SingleLayerNet()
def f():
with backprop.GradientTape():
net(inputs)
assert_no_leak(f)
def testMemoryLeakInSimpleModelForwardAndBackward(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
inputs = array_ops.zeros([32, 100], dtypes.float32)
net = SingleLayerNet()
def f():
with backprop.GradientTape() as tape:
result = net(inputs)
tape.gradient(result, net.variables)
del tape
assert_no_leak(f)
def testMemoryLeakInFunction(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
def f():
@def_function.function
def graph(x):
return x * x + x
graph(constant_op.constant(42))
assert_no_leak(f, num_iters=1000, increase_threshold_absolute_mb=30)
class RemoteWorkerMemoryTest(test.TestCase):
def __init__(self, method):
super(RemoteWorkerMemoryTest, self).__init__(method)
# used for remote worker tests
os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1"
self._cached_server = server_lib.Server.create_local_server()
self._cached_server_target = self._cached_server.target[len("grpc://"):]
def testMemoryLeakInLocalCopy(self):
if memory_profiler is None:
self.skipTest("memory_profiler required to run this test")
remote.connect_to_remote_host(self._cached_server_target)
# Run a function locally with the input on a remote worker and ensure we
# do not leak a reference to the remote tensor.
@def_function.function
def local_func(i):
return i
def func():
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
x = array_ops.zeros([1000, 1000], dtypes.int32)
local_func(x)
assert_no_leak(func, num_iters=100, increase_threshold_absolute_mb=50)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/eager/memory_test.py
|
tensorflow-master
|
tensorflow/python/compiler/__init__.py
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for python.compiler.xla.xla."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python import summary
from tensorflow.python.compiler.xla import xla
from tensorflow.python.eager import def_function
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_feed
_TRAIN = model_fn_lib.ModeKeys.TRAIN
_EVAL = model_fn_lib.ModeKeys.EVAL
_EXPECTED_LOSS = 1
_EXPECTED_FEATURE = 2
_EXPECTED_LABEL = 3
class XLACompileContextTest(test.TestCase, parameterized.TestCase):
def create_test_xla_compile_context(self):
computation_name = ops.get_default_graph().unique_name('computation')
pivot = control_flow_ops.no_op(name=computation_name + '/pivot')
return xla.XLACompileContext(name=computation_name, pivot=pivot)
@test_util.run_v1_only('Testing graph mode behavior only')
def test_report_unsupported_operations_graph_mode(self):
"""Tests that unsupported operations are detected."""
context = self.create_test_xla_compile_context()
context.Enter()
dummy_tensor = constant_op.constant(1.1)
audio_summary = summary.audio('audio_summary', dummy_tensor, 0.5)
histogram_summary = summary.histogram('histogram_summary', dummy_tensor)
image_summary = summary.image('image_summary', dummy_tensor)
scalar_summary = summary.scalar('scalar_summary', dummy_tensor)
tensor_summary = summary.tensor_summary('tensor_summary', dummy_tensor)
summary.merge(
[
audio_summary, histogram_summary, image_summary, scalar_summary,
tensor_summary
],
name='merge_summary')
logging_ops.Print(dummy_tensor, [dummy_tensor], name='print_op')
context.Exit()
unsupported_ops_names = [op.name for op in context._unsupported_ops]
self.assertEqual(unsupported_ops_names, [
u'audio_summary', u'histogram_summary', u'image_summary',
u'scalar_summary', u'tensor_summary', u'merge_summary/merge_summary',
u'print_op'
])
@test_util.run_v1_only('Testing graph mode behavior only')
def test_resource_variable_graph_mode(self):
"""Tests that resource variable usage is allowed."""
a = variable_scope.get_variable(
name='variable_a', use_resource=True, initializer=1)
context = self.create_test_xla_compile_context()
context.Enter()
a.assign(2)
context.Exit()
def test_resource_variable_in_function(self):
"""Tests that resource variable usage is allowed."""
a = variable_scope.get_variable(
name='variable_a', use_resource=True, initializer=1)
@def_function.function
def func():
context = self.create_test_xla_compile_context()
context.Enter()
o = a.assign(2)
context.Exit()
return o
self.assertEqual(self.evaluate(func()), 2)
@test_util.run_v1_only('Testing v1-only ref variable handling.')
def test_non_resource_variable_error(self):
"""Tests that non-resource variable usage is disallowed."""
a = variable_scope.get_variable(
name='variable_a', shape=(1), use_resource=False)
context = self.create_test_xla_compile_context()
context.Enter()
with self.assertRaisesRegexp(
NotImplementedError, 'Non-resource Variables are not supported inside '
r'XLA computations \(operator name: Assign\)'):
state_ops.assign(a, a + 1)
context.Exit()
@test_util.build_as_function_and_v1_graph
def test_nested_xla_compile_error(self):
"""Tests that nested XLA computation leads to fatal error."""
context1 = self.create_test_xla_compile_context()
context1.Enter()
context2 = self.create_test_xla_compile_context()
context2.Enter()
with self.assertRaisesRegexp(ValueError,
'XLA compiled computations cannot be nested'):
constant_op.constant(1)
context2.Exit()
context1.Exit()
@test_util.build_as_function_and_v1_graph
def test_xla_compile_attr(self):
"""Tests that ops are tagged with XLA compile ID attribute."""
context = self.create_test_xla_compile_context()
context.Enter()
op = constant_op.constant(1)
context.Exit()
self.assertIn('_xla_compile_id', op.op.node_def.attr)
@test_util.build_as_function_and_v1_graph
def test_op_without_input(self):
"""Tests that ops without inputs depend on pivot correctly."""
context = self.create_test_xla_compile_context()
context.Enter()
op = constant_op.constant(1)
context.Exit()
self.assertIn(context._pivot, op.op.control_inputs)
@test_util.run_v1_only('Testing graph mode behavior only')
def test_external_control_edges_graph_mode(self):
"""Tests that external control edges are handled correctly."""
i = constant_op.constant(1)
op1 = constant_op.constant(1)
with ops.control_dependencies([op1]):
op2 = constant_op.constant(1)
self.assertIn(op1.op, op2.op.control_inputs)
def while_body(i):
del i # unused
context = self.create_test_xla_compile_context()
context.Enter()
with ops.control_dependencies([op1]):
op3 = constant_op.constant(1)
context.Exit()
self.assertNotIn(op1.op, op3.op.control_inputs)
return op3
control_flow_ops.while_loop(
cond=lambda i: math_ops.less(i, 10), body=while_body, loop_vars=[i])
@test_util.build_as_function_and_v1_graph
def test_op_output_marked_as_seen(self):
"""Tests that any op output is marked as seen in context."""
context = self.create_test_xla_compile_context()
context.Enter()
op = constant_op.constant(1)
context.Exit()
self.assertIn(op.name, context._values)
@test_util.build_as_function_and_v1_graph
def test_op_is_in_context(self):
"""Tests that XLACompileContext is recognized as an XLA context."""
op1 = constant_op.constant(1)
context = self.create_test_xla_compile_context()
context.Enter()
op2 = constant_op.constant(2)
context.Exit()
self.assertFalse(control_flow_util.IsInXLAContext(op1.op))
self.assertTrue(control_flow_util.IsInXLAContext(op2.op))
@test_util.build_as_function_and_v1_graph
def test_op_prevent_feeding(self):
"""Tests that ops created inside XLACompileContext can not be fed."""
context = self.create_test_xla_compile_context()
context.Enter()
op = constant_op.constant(1)
context.Exit()
self.assertFalse(op.graph.is_feedable(op.op))
@test_util.build_as_function_and_v1_graph
def test_op_prevent_fetching(self):
"""Tests that ops created inside XLACompileContext can not be fetched."""
context = self.create_test_xla_compile_context()
context.Enter()
op = constant_op.constant(1)
context.Exit()
self.assertFalse(op.graph.is_fetchable(op.op))
class XlaCompileTest(test.TestCase):
@test_util.run_v2_only
def test_xla_compile_eager(self):
"""Tests that xla.compile raises proper exception when used eagerly."""
def computation(a, b):
return a + b
self.assertEqual(self.evaluate(xla.compile(computation, [1, 2])[0]), 3)
def test_xla_compile_in_function(self):
"""Tests that xla.compile works in tf.function."""
@def_function.function
def func_wrapper(a):
def compute(a):
return a + 1
return xla.compile(compute, [a])
self.assertEqual(self.evaluate(func_wrapper(1))[0], 2)
def test_xla_compile_write_variable_in_function(self):
"""Tests that xla.compile works with variable in tf.function."""
a = variable_scope.get_variable(
name='variable_a', use_resource=True, initializer=1)
@def_function.function
def func_wrapper():
def compute():
a.assign_add(1)
a.assign_sub(2)
return a.read_value()
return xla.compile(compute)
self.evaluate(a.initializer)
self.assertEqual(self.evaluate(func_wrapper())[0], 0)
class CheckFunctionArgumentCountTest(test.TestCase):
def test_simple(self):
"""Tests that arg checker works for functions with no varargs or defaults.
"""
def func(x, y, z):
return x + y + z
self.assertEqual(None, xla.check_function_argument_count(func, 3, None))
self.assertEqual('exactly 3 arguments',
xla.check_function_argument_count(func, 2, None))
queue = tpu_feed.InfeedQueue(2)
self.assertEqual(None, xla.check_function_argument_count(func, 1, queue))
self.assertEqual('exactly 3 arguments',
xla.check_function_argument_count(func, 2, queue))
def test_default_args(self):
"""Tests that arg checker works for a function with no varargs."""
def func(x, y, z=17):
return x + y + z
self.assertEqual(None, xla.check_function_argument_count(func, 3, None))
self.assertEqual(None, xla.check_function_argument_count(func, 2, None))
self.assertEqual('at least 2 arguments',
xla.check_function_argument_count(func, 1, None))
self.assertEqual('at most 3 arguments',
xla.check_function_argument_count(func, 4, None))
queue = tpu_feed.InfeedQueue(1)
self.assertEqual(None, xla.check_function_argument_count(func, 2, queue))
self.assertEqual(None, xla.check_function_argument_count(func, 1, queue))
self.assertEqual('at least 2 arguments',
xla.check_function_argument_count(func, 0, queue))
self.assertEqual('at most 3 arguments',
xla.check_function_argument_count(func, 4, queue))
def test_var_args(self):
"""Tests that arg checker works for a function with varargs."""
def func(x, y, *z):
return x + y + len(z)
self.assertEqual(None, xla.check_function_argument_count(func, 2, None))
self.assertEqual(None, xla.check_function_argument_count(func, 3, None))
self.assertEqual(None, xla.check_function_argument_count(func, 4, None))
self.assertEqual('at least 2 arguments',
xla.check_function_argument_count(func, 1, None))
queue = tpu_feed.InfeedQueue(1)
self.assertEqual(None, xla.check_function_argument_count(func, 1, queue))
self.assertEqual(None, xla.check_function_argument_count(func, 2, queue))
self.assertEqual(None, xla.check_function_argument_count(func, 3, queue))
self.assertEqual('at least 2 arguments',
xla.check_function_argument_count(func, 0, queue))
def test_var_args_and_defaults(self):
"""Tests that arg checker works for a function with varargs and defaults."""
def func(x, y, z=17, *q): # pylint: disable=keyword-arg-before-vararg
return x + y + z + len(q)
self.assertEqual(None, xla.check_function_argument_count(func, 2, None))
self.assertEqual(None, xla.check_function_argument_count(func, 3, None))
self.assertEqual(None, xla.check_function_argument_count(func, 4, None))
self.assertEqual(None, xla.check_function_argument_count(func, 5, None))
self.assertEqual('at least 2 arguments',
xla.check_function_argument_count(func, 1, None))
queue = tpu_feed.InfeedQueue(1)
self.assertEqual(None, xla.check_function_argument_count(func, 1, queue))
self.assertEqual(None, xla.check_function_argument_count(func, 2, queue))
self.assertEqual(None, xla.check_function_argument_count(func, 3, queue))
self.assertEqual(None, xla.check_function_argument_count(func, 4, queue))
self.assertEqual('at least 2 arguments',
xla.check_function_argument_count(func, 0, queue))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/xla/xla_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for controlling the Tensorflow/XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import tf_export
_XLA_SCOPE_KEY = ("__xla_scope",)
class _XlaScope(object):
"""Keeps track of previous XLA scope calls, and depth of current call."""
def __init__(self, count, depth):
self.count = count
self.depth = depth
@contextlib.contextmanager
@tf_export("xla.experimental.jit_scope")
def experimental_jit_scope(compile_ops=True, separate_compiled_gradients=False):
"""Enable or disable JIT compilation of operators within the scope.
NOTE: This is an experimental feature.
The compilation is a hint and only supported on a best-effort basis.
Example usage:
with tf.xla.experimental.jit_scope():
c = tf.matmul(a, b) # compiled
with tf.xla.experimental.jit_scope(compile_ops=False):
d = tf.matmul(a, c) # not compiled
with tf.xla.experimental.jit_scope(
compile_ops=lambda node_def: 'matmul' in node_def.op.lower()):
e = tf.matmul(a, b) + d # matmul is compiled, the addition is not.
Example of separate_compiled_gradients:
# In the example below, the computations for f, g and h will all be compiled
# in separate scopes.
with tf.xla.experimental.jit_scope(
separate_compiled_gradients=True):
f = tf.matmul(a, b)
g = tf.gradients([f], [a, b], name='mygrads1')
h = tf.gradients([f], [a, b], name='mygrads2')
Args:
compile_ops: Whether to enable or disable compilation in the scope.
Either a Python bool, or a callable that accepts the parameter
`node_def` and returns a python bool.
separate_compiled_gradients: If true put each gradient subgraph into a
separate compilation scope. This gives fine-grained control over which
portions of the graph will be compiled as a single unit. Compiling
gradients separately may yield better performance for some graphs.
The scope is named based on the scope of the forward computation as well
as the name of the gradients. As a result, the gradients will be compiled
in a scope that is separate from both the forward computation, and from
other gradients.
Raises:
RuntimeError: if called when eager execution is enabled.
Yields:
The current scope, enabling or disabling compilation.
"""
if context.executing_eagerly():
raise RuntimeError("xla.experimental.jit_scope is not supported when eager "
"execution is enabled. Try use it inside tf.function.")
if callable(compile_ops):
def xla_compile(node_def):
return attr_value_pb2.AttrValue(b=compile_ops(node_def))
else:
xla_compile = attr_value_pb2.AttrValue(b=compile_ops)
attrs = {
"_XlaCompile":
xla_compile,
"_XlaSeparateCompiledGradients":
attr_value_pb2.AttrValue(b=bool(separate_compiled_gradients))
}
# Find the singleton counter for the current scoped graph. If it
# doesn't exist, create one.
xla_scope_counter = ops.get_collection(_XLA_SCOPE_KEY)
if not xla_scope_counter:
xla_scope_counter = _XlaScope(0, 0)
ops.add_to_collection(_XLA_SCOPE_KEY, xla_scope_counter)
else:
xla_scope_counter = xla_scope_counter[0]
if xla_scope_counter.depth == 0:
# If we're at the root xla scope, we can increase the counter so
# future calls to jit_scope use a different scope value.
# If we're already within a scope, we'll be fusing using the scope
# controlled by the parent.
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("jit_scope_%d" % xla_scope_counter.count).encode())
xla_scope_counter.count += 1
xla_scope_counter.depth += 1
# pylint: disable=protected-access
with ops.get_default_graph()._attr_scope(attrs):
yield
# pylint: enable=protected-access
xla_scope_counter.depth -= 1
|
tensorflow-master
|
tensorflow/python/compiler/xla/jit.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for controlling the Tensorflow/XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.compiler.xla import jit
from tensorflow.python.compiler.xla import xla
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/python/compiler/xla/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python.compiler.xla.jit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.compiler.xla import jit
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
_REGISTERED_OPS = op_def_registry.get_registered_ops()
def enable_jit_nonstateful(node_def):
try:
return not _REGISTERED_OPS[node_def.op].is_stateful
except KeyError:
raise ValueError("Unregistered op being created: %s" % node_def)
class JITTest(test.TestCase, parameterized.TestCase):
def compute(self, use_jit, compute_fn):
random_seed.set_random_seed(1234)
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(use_jit):
r = compute_fn()
sess.run(variables.global_variables_initializer())
return (r, sess.run(r))
@test_util.run_v2_only
def testJITInEager(self):
with self.assertRaisesRegexp(
RuntimeError, "xla.experimental.jit_scope is not supported when eager "
"execution is enabled. Try use it inside tf.function."):
with jit.experimental_jit_scope(True):
constant_op.constant(1)
@test_util.build_as_function_and_v1_graph
def testJITCreateOpsLambda(self):
"""Test several ways of customizing the compilation attribute."""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = random_ops.random_uniform((1,), seed=1)
return inputs
v_false_1_t, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
v_true_1_t, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
v_all_true_t, _ = self.compute(True, create_ops)
self.assertFalse(v_false_1_t.op.get_attr("_XlaCompile"))
v_true_1_t_sampler_op = v_true_1_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
v_all_true_t_sampler_op = v_all_true_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
self.assertFalse(v_true_1_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_true_1_t.op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t.op.get_attr("_XlaCompile"))
# Additionally ensure that where no JIT compilation happens on the
# random_uniform op, the output values are identical to the case
# where no JIT compilation happens anywhere.
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
@test_util.build_as_function_and_v1_graph
def testJITXlaScope(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
# XlaScope 0
a1 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope 1
a2 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 1
a3 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 2
a4 = constant_op.constant(1)
# XlaScope still 1, depth 1
a5 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope now 2, depth 0
a6 = constant_op.constant(1)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a3.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a4.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a5.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_2", a6.op.get_attr("_XlaScope"))
@test_util.build_as_function_and_v1_graph
def testJITVariableSeed(self):
"""Test that the stateful initializer is not marked for compilation.
XLA does not currently support seeded initialization and XLA initializers
therefore return different values than non-XLA counterparts. Here
we ensure that if we can disable JIT compilation for the initializers and
get the same variable values as if no JIT compilation happened.
"""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable("var", (1,))
return inputs
_, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
_, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
@test_util.build_as_function_and_v1_graph
def testDefunNoJitScope(self):
with self.session(graph=ops.Graph()):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
# No enclosing jit scope so function sets its own value for _XlaScope.
self.assertEqual(b"function_mulop", func_attrs["_XlaScope"].s)
@test_util.build_as_function_and_v1_graph
def testDefunInheritsJitScope(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
# Ensure _XlaScope is inherited from enclosing context.
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
class CompilationEnabledInGradientTest(test.TestCase, parameterized.TestCase):
@test_util.build_as_function_and_v1_graph
def testCompilationInGradient(self):
with self.cached_session():
x = constant_op.constant([[3.]])
y_nc = math_ops.matmul(x, x, name="not_compiled")
with jit.experimental_jit_scope():
y_c = math_ops.matmul(y_nc, y_nc, name="compiled")
x_grads = gradients.gradients([y_c], [x])[0]
operations = x.graph.get_operations()
c_grad_ops = [
op for op in operations if "gradients/compiled" in op.name]
nc_grad_ops = [
op for op in operations if "gradients/not_compiled" in op.name]
self.assertGreater(len(c_grad_ops), 0)
self.assertGreater(len(nc_grad_ops), 0)
for cg in c_grad_ops:
self.assertTrue(cg.get_attr("_XlaCompile"))
for ncg in nc_grad_ops:
with self.assertRaisesRegexp(ValueError, "[Nn]o attr named"):
ncg.get_attr("_XlaCompile")
# d/dx (x ** 4) = 4 * (x ** 3)
self.assertAllClose([[108]], x_grads.eval())
@test_util.build_as_function_and_v1_graph
def testCompilationGradientScopeNames(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope():
# XlaScope 0
a1 = constant_op.constant([[1.]])
a1t = math_ops.matmul(a1, a1)
with jit.experimental_jit_scope():
# XlaScope 1
a2 = constant_op.constant([[1.]])
a2t = math_ops.matmul(a2, a2)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", grad_a2.op.get_attr("_XlaScope"))
@test_util.build_as_function_and_v1_graph
def testCompilationSeparateGradientScopeNames(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 0
a1 = constant_op.constant([[1.]])
a1t = math_ops.matmul(a1, a1)
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 1
a2 = constant_op.constant([[1.]])
a2t = math_ops.matmul(a2, a2)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1_grad_GB",
grad_a2.op.get_attr("_XlaScope"))
@test_util.build_as_function_and_v1_graph
def testPlaysNicelyWithDefun(self):
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with the same
# _XlaScope as the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
@test_util.build_as_function_and_v1_graph
def testPlaysNicelyWithDefunSeparateGradientScope(self):
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True):
@function.Defun(
compiled=True, noinline=True, separate_compiled_gradients=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with a different
# _XlaScope from the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/xla/jit_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""xla is an experimental library that provides XLA support APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.jit.ops import xla_ops
from tensorflow.compiler.jit.ops import xla_ops_grad # pylint: disable=unused-import
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.distribute import summary_op_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_XLA_COMPILE_ATTR = '_xla_compile_id'
_MAX_WARNING_LINES = 5
# Operations that indicate some error in the users graph. For example, XLA
# computation should not have any Placeholder op.
_BLACKLISTED_OPS = set([
'Placeholder',
])
# XLA doesn't currently support reading of intermediate tensors, thus some ops
# are not supported.
_UNSUPPORTED_OPS = set([
'AudioSummary',
'AudioSummaryV2',
'HistogramSummary',
'ImageSummary',
'MergeSummary',
'Print',
'ScalarSummary',
'TensorSummary',
'TensorSummaryV2',
])
@tf_export('xla.experimental.compile')
def compile(computation, inputs=None): # pylint: disable=redefined-builtin
"""Builds an operator that compiles and runs `computation` with XLA.
NOTE: In eager mode, `computation` will have `@tf.function` semantics.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors.
`computation` may return a list of operations and tensors. Tensors must
come before operations in the returned list. The return value of
`compile` is a list of tensors corresponding to the tensors from the
output of `computation`.
All `Operation`s returned from `computation` will be executed when
evaluating any of the returned output tensors.
inputs: A list of inputs or `None` (equivalent to an empty list). Each input
can be a nested structure containing values that are convertible to
tensors. Note that passing an N-dimension list of compatible values will
result in a N-dimension list of scalar tensors rather than a single Rank-N
tensors. If you need different behavior, convert part of inputs to tensors
with `tf.convert_to_tensor`.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
RuntimeError: if called when eager execution is enabled.
"""
if context.executing_eagerly():
@def_function.function
def xla_compile_wrapper():
return _compile_internal(computation, inputs)
return xla_compile_wrapper()
return _compile_internal(computation, inputs)
class XLACompileContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside an XLA computation cluster.
THIS IS ONLY FOR TENSORFLOW INTERNAL IMPLEMENTATION, DO NO USE DIRECTLY.
The primary role of `XLACompileContext` is to mark operators inside a
xla.compile() computation with attribute "_xla_compile_id=XYZ", where XYZ is
a unique name.
`ControlFlowContext` is used to perform the annotation since it integrates
with Tensorflow constructs like ResourceVariables. For example, if a
`ResourceVariable` is constructed inside a xla.compile() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the compiled computation.
"""
def __init__(self, name, pivot):
"""Builds a new XLACompileContext.
Args:
name: a unique name for the context, used to populate the
`_xla_compile_id` attribute.
pivot: a pivot node. Nodes in the XLACompileContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(XLACompileContext, self).__init__()
self._name = name
self._name_as_bytes = compat.as_bytes(name)
self._unsupported_ops = []
self._pivot = pivot
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = '\n'.join([
' %s (%s)' % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]
])
logging.warning('%d unsupported operations found: \n%s',
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning('... and %d more',
len(self._unsupported_ops) - _MAX_WARNING_LINES)
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
internal_control_inputs = []
external_control_inputs = []
for x in op.control_inputs:
# pylint: disable=protected-access
is_internal_op = False
ctxt = x._get_control_flow_context()
while ctxt is not None:
if ctxt == self:
is_internal_op = True
break
ctxt = ctxt._outer_context
if is_internal_op:
internal_control_inputs.append(x)
else:
external_control_inputs.append(x)
# pylint: enable=protected-access
# pylint: disable=protected-access
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
# pylint: enable=protected-access
return internal_control_inputs, external_control_inputs
def AddOp(self, op):
"""Create op in XLACompileContext and notifies outer context recursively."""
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error(
'Operation of type %s (%s) is not supported in XLA. Execution will '
'fail if this op is used in the graph. ', op.type, op.name)
# TODO(ycao): Automatically disable summaries instead of reporting them.
if op.type in _UNSUPPORTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
'Non-resource Variables are not supported inside XLA computations '
'(operator name: %s)' % op.name)
if _XLA_COMPILE_ATTR in op.node_def.attr:
raise ValueError('XLA compiled computations cannot be nested, (operator '
'name: %s)' % op.name)
op._set_attr(
_XLA_COMPILE_ATTR, attr_value_pb2.AttrValue(s=self._name_as_bytes))
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors. An example is when one of op's inputs is
# generated in a different While control flow context.
(internal_control_inputs,
external_control_inputs) = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not internal_control_inputs:
# pylint: disable=protected-access
op._add_control_input(self._pivot)
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_control_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_control_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_control_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_control_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self.AddOp(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the XLACompileContext to
# be None as the XLACompileContext does not get nested nor does the
# grad_state outside the XLACompileContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def _compile_internal(computation, inputs=None):
"""Builds graph operators that compiles and symbolically executes computation.
Args:
computation: A Python function that builds the computation to compile and
execute.
inputs: A list of inputs or `None` (equivalent to an empty list). Each input
can be a nested structure containing values that are convertible to
tensors. Note that passing an N-dimension list of compatible values will
result in a N-dimension list of scalar tensors rather than a single Rank-N
tensors. If you need different behavior, convert part of inputs to tensors
with `tf.convert_to_tensor`.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include: 1) None output 2) Single
value output 3) Operation-only outputs
Raises:
ValueError: If any element in computation outputs is neither an operations
or a value that can be converted to tensor.
ValueError: If computation outputs is non-flat and contains any Operations.
TypeError: If `inputs` is not a list or tuple.
"""
if inputs is None:
inputs = []
if not isinstance(inputs, collections.Sequence):
raise TypeError('inputs must be a list')
# Flatten inputs.
flat_inputs = nest.flatten(inputs)
# Converts inputs to Tensors.
flat_inputs = [ops.convert_to_tensor(x) for x in flat_inputs]
cluster_name = ops.get_default_graph().unique_name('cluster')
pivot = control_flow_ops.no_op(name=cluster_name + '/pivot')
context = XLACompileContext(name=cluster_name, pivot=pivot)
try:
context.Enter()
# Add identity ops so even unused inputs are 'consumed' by the
# computation.
flat_inputs = [
array_ops.identity(x, name='input_{}'.format(i))
for i, x in enumerate(flat_inputs)
]
# Re-pack flat_inputs in same structure as 'inputs'.
computation_inputs = nest.pack_sequence_as(
structure=inputs, flat_sequence=flat_inputs)
# Only resource variables work inside an XLA computation, so turn on
# resource variables for the computation.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
with _disable_summary_context():
outputs = computation(*computation_inputs)
# Restore variable scope after computation.
vscope.set_use_resource(saved_use_resource)
outputs_is_flat = is_flat(outputs)
if outputs_is_flat:
output_tensors, control_deps = _postprocess_flat_outputs(outputs)
else:
output_tensors, control_deps = _postprocess_non_flat_outputs(outputs)
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
# When XLA computation returns only operations and no tensors, a NoOp
# dependent on the operations in outputs is returned. Otherwise final
# outputs would be empty and there is no way to trigger returned
# operations.
if not output_tensors:
return control_flow_ops.group(control_deps, name='output_0')
output_tensors = [
xla_ops.xla_cluster_output(o, name='output{}'.format(i))
for i, o in enumerate(output_tensors)
]
with ops.control_dependencies(control_deps):
# Wraps the outputs in identity operators that carries control
# dependencies.
output_tensors = [
array_ops.identity(o, name='output_%d' % i)
for i, o in enumerate(output_tensors)
]
# If `computation` returned non-flat output structure, pack output tensors
# back into same structure.
if not outputs_is_flat:
output_tensors = nest.pack_sequence_as(
structure=outputs, flat_sequence=output_tensors)
return output_tensors
def is_flat(outputs):
"""Checks if outputs is a flat structure.
Following structures and values are considered flat:
1) None
2) A single object
3) A list or tuple of Tensors/Operations
The only structures that this function understands are sequences and
dictionaries. E.g. this means that if outputs contains a single
user-defined Object, it is considered to be flat. Errors are raised later on
if that Object cannot be converted to a Tensor.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
A boolean indicates whether outputs is flat.
"""
# If outputs is a list or tuple, check if it has any nested structure. If
# there is, then outputs is non-flat.
if isinstance(outputs, collections.Sequence):
for o in outputs:
if isinstance(o, collections.Sequence) or isinstance(o, dict):
return False
# If outputs is a dict, it is non-flat.
if isinstance(outputs, dict):
return False
# Getting here means either outputs itself is a single non-structured value
# or it is a flat list of single non-structured values.
return True
def _postprocess_flat_outputs(outputs):
"""Validates flat outputs and adds back device assignments.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
Tensors and Operations extracted from outputs.
"""
# Following code segment is to preserve legacy behavior. Previously we only
# supported flat outputs and thus for consistency it was nice to convert even
# single element into a tuple. But now that we support arbitrary output
# structure, this is no longer necessary.
# TODO(b/121383831): Migrate all legacy use cases and delete this special
# case.
# If the computation returns `None`, make it an empty tuple.
if outputs is None:
outputs = tuple()
# If the computation only returned one value, make it a tuple.
if not isinstance(outputs, collections.Sequence):
outputs = (outputs,)
# Append `no_op` here so that return value of this function always contains
# at least one op that can trigger XlaLaunch node.
outputs += (control_flow_ops.no_op(),)
try:
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
'XLA computation function return values must all either be Operations'
' or convertible to Tensors. Got error: "%s"' % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
'XLA computation function must return zero or more Tensor values '
'followed by zero or more Operations.')
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else ''):
new_output_tensors.append(array_ops.identity(t))
return new_output_tensors, output_operations
def _postprocess_non_flat_outputs(outputs):
"""Validates non-flat outputs and adds back device assignments.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
Tensors extracted from outputs and an empty list because Operations are not
allowed in non-flat outputs..
"""
# Convert all non-Operation outputs to Tensors.
new_output_tensors = []
for o in nest.flatten(outputs):
if isinstance(o, ops.Operation):
raise ValueError(
'xla.compile does not support Operation as return value in non-flat '
'output structure. You can set returned Operations as control '
'dependencies of returned Tensors so Operations are triggered when '
'Tensors are evaluated. Operation found: "%s"' % o.name)
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(
'XLA computation function return values must all either be '
'Operations or convertible to Tensors. Got error: "%s"' % str(e))
# Makes sure even pass-through inputs/outputs are touched in compile
# context by creating an Identity node inside compile context.
with ops.device(o.device if o.device else ''):
new_output_tensors.append(array_ops.identity(o))
return new_output_tensors, []
@contextlib.contextmanager
def _disable_summary_context():
"""Enters a context where all summary ops are skipped.
Summaries are not yet supported in xla.compile(). So we provide this context
manager that can skip creating summary ops. This is a temporary workaround due
to XLA not supporting summary ops.
Yields:
None.
"""
original_skip_summary_func = summary_op_util.skip_summary
summary_op_util.skip_summary = lambda: True
try:
yield
finally:
summary_op_util.skip_summary = original_skip_summary_func
class _CapturedObject(object):
"""A placeholder to capture an object."""
def __init__(self):
self._object = None
def capture(self, o):
if self._object:
raise RuntimeError(
'InternalError: _CapturedObject can capture only once. Please file '
'bug.')
self._object = o
def get(self):
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
scaffold_fn = captured_scaffold_fn.get()
if not scaffold_fn:
return None
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
return scaffold
def check_function_argument_count(func, input_arity, infeed_queue):
"""Validate the number of input arguments to an XLA function.
Args:
func: the Python function that will be called to generate the body of an XLA
computation graph.
input_arity: the number of explicit arguments supplied by the caller.
infeed_queue: if not None, the infeed queue that will supply
additional arguments to the function.
Returns:
None if function can be called with the supplied number of
arguments, or an error string if it cannot.
"""
def format_error(complaint, quantity):
return '%s %d argument%s' % (complaint, quantity, ''
if quantity == 1 else 's')
num_args_supplied = input_arity
if infeed_queue is not None:
num_args_supplied += infeed_queue.number_of_tuple_elements
arg_spec = tf_inspect.getargspec(func)
num_func_args = len(arg_spec.args)
if arg_spec.defaults is None:
num_func_defaults = 0
else:
num_func_defaults = len(arg_spec.defaults)
min_func_args = num_func_args - num_func_defaults
if num_args_supplied < min_func_args:
# The required number of arguments is not enough to call the function.
if num_func_defaults == 0 and arg_spec.varargs is None:
return format_error('exactly', num_func_args)
else:
return format_error('at least', min_func_args)
if arg_spec.varargs is None and num_args_supplied > num_func_args:
# The required number of arguments is too many to call the function.
if num_func_defaults == 0:
return format_error('exactly', num_func_args)
else:
return format_error('at most', num_func_args)
# Reaching here means either
# 1) There are varargs, func can accept any number of arguments greater than
# the minimum.
# 2) Number of supplied arguments falls in range of acceptable argument count
# of func.
return None
|
tensorflow-master
|
tensorflow/python/compiler/xla/xla.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the Python wrapper conversion to trt_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import platform
import tempfile
import six as _six
from tensorflow.compiler.tf2tensorrt import wrap_py_utils
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util.lazy_loader import LazyLoader
# Lazily load the op, since it's not available in cpu-only builds. Importing
# this at top will cause tests that imports TF-TRT fail when they're built
# and run without CUDA/GPU.
gen_trt_ops = LazyLoader(
"gen_trt_ops", globals(),
"tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops")
# Register TRT ops in python, so that when users import this module they can
# execute a TRT-converted graph without calling any of the methods in this
# module.
if wrap_py_utils.is_tensorrt_enabled():
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported")
# This will call register_op_list() in
# tensorflow/python/framework/op_def_registry.py, but it doesn't register
# the op or the op kernel in C++ runtime.
gen_trt_ops.trt_engine_op # pylint: disable=pointless-statement
def _to_bytes(s):
"""Encode s if it is a sequence of chars."""
if isinstance(s, _six.text_type):
return s.encode("utf-8", errors="surrogateescape")
return s
def _to_string(s):
"""Decode s if it is a sequence of bytes."""
if isinstance(s, _six.binary_type):
return s.decode("utf-8")
return s
class GraphConverter(object):
"""Base class for offline converters to optimize SavedModels/GraphDefs.
A `GraphConverter` object encapsulates the environment to convert (optimize) a
TensorFlow SavedModel or GraphDef.
To create a custom GraphConverter:
```python
class MyGraphConverter(GraphConverter):
...
def get_rewriter_config(self):
my_rewriter_config = ...
return my_rewriter_config
```
Then to run the conversion without quantization calibration:
```python
my_converter = MyGraphConverter(input_saved_model_dir="my_dir")
converted_graph_def = my_converter.convert()
my_converter.save(output_saved_model_dir) # Optional
```
To run the conversion with quantization calibration:
```python
my_converter = MyGraphConverter(input_saved_model_dir="my_dir")
my_converter.convert()
# Run calibration 10 times.
converted_graph_def = my_converter.calibrate(
fetch_names=['output:0'],
num_runs=10,
feed_dict_fn=lambda: {'input:0': my_next_data()})
my_converter.save(output_saved_model_dir) # Optional
```
"""
# TODO(laigd): clean up the parameters.
def __init__(self,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
input_graph_def=None,
nodes_blacklist=None,
session_config=None):
"""Initialize the converter.
Args:
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
input_graph_def: a GraphDef object containing a model to be transformed.
If set to None, the graph will be read from the SavedModel loaded from
input_saved_model_dir.
nodes_blacklist: list of node names to prevent the converter from
touching.
session_config: the ConfigProto used to create a Session. It's also used
as a template to create a RewriterConfig for conversion. If not
specified, a default ConfigProto will be used.
Raises:
ValueError: if the combination of the parameters is invalid.
"""
if input_graph_def and input_saved_model_dir:
raise ValueError(
"Can only specify one of input_graph_def and input_saved_model_dir")
if not input_graph_def and not input_saved_model_dir:
raise ValueError("Must specify one of input_graph_def and "
"input_saved_model_dir")
self._input_graph_def = input_graph_def
self._nodes_blacklist = nodes_blacklist
self._input_saved_model_dir = input_saved_model_dir
self._converted = False
self._grappler_meta_graph_def = None
self._input_saved_model_tags = (
input_saved_model_tags or [tag_constants.SERVING])
self._input_saved_model_signature_key = (
input_saved_model_signature_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
self._session_config = session_config or config_pb2.ConfigProto()
# For calibration usage.
self._calibration_graph = None
self._calibration_sess = None
self._calibration_data_collected = False
def get_rewriter_config(self):
"""Returns a RewriterConfig proto for TRT transformation.
Returns:
A RewriterConfig proto which will be used to run the conversion using
Grappler.
"""
raise NotImplementedError("get_rewriter_config")
def _run_conversion(self):
"""Run Grappler's OptimizeGraph() tool to convert the graph."""
# Create custom ConfigProto for Grappler.
grappler_session_config = config_pb2.ConfigProto()
grappler_session_config.CopyFrom(self._session_config)
custom_rewriter_config = self.get_rewriter_config()
grappler_session_config.graph_options.rewrite_options.CopyFrom(
custom_rewriter_config)
# Run Grappler.
self._converted_graph_def = tf_optimizer.OptimizeGraph(
grappler_session_config,
self._grappler_meta_graph_def,
graph_id=b"tf_graph")
self._converted = True
def _add_nodes_blacklist(self):
if self._nodes_blacklist:
collection_def = self._grappler_meta_graph_def.collection_def["train_op"]
blacklist = collection_def.node_list.value
for i in self._nodes_blacklist:
if isinstance(i, ops.Tensor):
blacklist.append(_to_bytes(i.name))
else:
blacklist.append(_to_bytes(i))
def _convert_graph_def(self):
"""Convert the input GraphDef."""
graph = ops.Graph()
with graph.as_default():
importer.import_graph_def(self._input_graph_def, name="")
self._grappler_meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
self._add_nodes_blacklist()
self._run_conversion()
def _collections_to_keep(self, collection_keys):
# TODO(laigd): currently we use the collection key to filter out
# collections that depend on variable ops, but this may miss some
# other user-defined collections. A better way would be to use
# CollectionDef::NodeList for the filtering.
collections_to_remove = (
ops.GraphKeys._VARIABLE_COLLECTIONS + [
ops.GraphKeys.TRAIN_OP, ops.GraphKeys.WHILE_CONTEXT,
ops.GraphKeys.COND_CONTEXT
])
return [key for key in collection_keys if key not in collections_to_remove]
def _convert_saved_model(self):
"""Convert the input SavedModel."""
graph = ops.Graph()
with session.Session(graph=graph, config=self._session_config) as sess:
input_meta_graph_def = loader.load(sess, self._input_saved_model_tags,
self._input_saved_model_dir)
input_signature_def = input_meta_graph_def.signature_def[
self._input_saved_model_signature_key]
def _gather_names(tensor_info):
"""Get the node names from a TensorInfo."""
return set([tensor_info[key].name.split(":")[0] for key in tensor_info])
# Get input and outputs from all SignatureDef.
output_node_names = _gather_names(input_signature_def.inputs).union(
_gather_names(input_signature_def.outputs))
# Preserve nodes in collection
for collection_key in self._collections_to_keep(
input_meta_graph_def.collection_def):
for op in sess.graph.get_collection(collection_key):
if isinstance(op, ops.Operation):
output_node_names.add(op.name.split(":")[0])
# Freeze the variables in the SavedModel graph and copy the frozen
# graph over.
frozen_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(add_shapes=True),
list(output_node_names))
self._grappler_meta_graph_def = meta_graph_pb2.MetaGraphDef()
self._grappler_meta_graph_def.graph_def.CopyFrom(frozen_graph_def)
# Copy the collections that are not variables.
for collection_key in self._collections_to_keep(
input_meta_graph_def.collection_def):
self._grappler_meta_graph_def.collection_def[collection_key].CopyFrom(
input_meta_graph_def.collection_def[collection_key])
self._add_nodes_blacklist()
# Copy other information.
self._grappler_meta_graph_def.meta_info_def.CopyFrom(
input_meta_graph_def.meta_info_def)
self._grappler_meta_graph_def.signature_def[
self._input_saved_model_signature_key].CopyFrom(input_signature_def)
# TODO(laigd): maybe add back AssetFileDef.
self._run_conversion()
def convert(self):
"""Run the conversion.
Returns:
The converted GraphDef for TF 1.x, or the converted ConcreteFunction in TF
2.0+.
"""
assert not self._converted
if self._input_graph_def:
self._convert_graph_def()
else:
self._convert_saved_model()
return self._converted_graph_def
def calibrate(self,
fetch_names,
num_runs,
feed_dict_fn=None,
input_map_fn=None):
"""Run the calibration and return the calibrated GraphDef.
Args:
fetch_names: a list of output tensor name to fetch during calibration.
num_runs: number of runs of the graph during calibration.
feed_dict_fn: a function that returns a dictionary mapping input names (as
strings) in the GraphDef to be calibrated to values (e.g. Python list,
numpy arrays, etc). One and only one of `feed_dict_fn` and
`input_map_fn` should be specified.
input_map_fn: a function that returns a dictionary mapping input names (as
strings) in the GraphDef to be calibrated to Tensor objects. The values
of the named input tensors in the GraphDef to be calibrated will be
re-mapped to the respective `Tensor` values during calibration. One and
only one of `feed_dict_fn` and `input_map_fn` should be specified.
Raises:
ValueError: if the input combination is invalid.
RuntimeError: if this method is called in eager mode.
Returns:
The GraphDef after the calibration.
"""
assert self._converted
assert not self._calibration_sess
if context.executing_eagerly():
raise RuntimeError("Calibration for TF 2.0 is not supported yet.")
if (feed_dict_fn and input_map_fn) or (not feed_dict_fn and
not input_map_fn):
raise ValueError(
"Should specify one and only one of feed_dict_fn and input_map_fn.")
self._calibration_graph = ops.Graph()
with self._calibration_graph.as_default():
fetches = importer.import_graph_def(
self._converted_graph_def,
input_map=input_map_fn() if input_map_fn else None,
return_elements=fetch_names,
name="")
self._calibration_sess = session.Session(
graph=self._calibration_graph, config=self._session_config)
for _ in range(num_runs):
self._calibration_sess.run(
fetches, feed_dict=feed_dict_fn() if feed_dict_fn else None)
self.finalize_calibration()
return self._converted_graph_def
def finalize_calibration(self):
"""Clean up calibration resources and finalize the calibration.
Implementations need to close self._calibration_sess before returning.
"""
raise NotImplementedError("finalize_calibration")
def save(self, output_saved_model_dir):
"""Save the converted graph as a SavedModel.
Args:
output_saved_model_dir: construct a SavedModel using the converted
GraphDef and save it to the specified directory. This option only works
when the input graph is loaded from a SavedModel, i.e. when
input_saved_model_dir is specified and input_graph_def is None in
__init__().
Raises:
ValueError: if the input to the converter is a GraphDef instead of a
SavedModel.
"""
assert self._converted
if self._input_graph_def:
raise ValueError(
"Not able to save to a SavedModel since input is a GraphDef")
def _restore_collections(dest_graph, src_meta_graph_def, collection_keys):
"""Restores collections that we need to keep."""
scope = ""
for key in collection_keys:
collection_def = src_meta_graph_def.collection_def[key]
kind = collection_def.WhichOneof("kind")
if kind is None:
tf_logging.error(
"Cannot identify data type for collection %s. Skipping.", key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto and kind == "bytes_list":
proto_type = ops.get_collection_proto_type(key)
# It is assumed that there are no Variables Keys in collections
for value in collection_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
try:
new_value = from_proto(proto, import_scope=scope)
except:
continue
dest_graph.add_to_collection(key, new_value)
else:
field = getattr(collection_def, kind)
if kind == "node_list":
for value in field.value:
name = ops.prepend_name_scope(value, scope)
# Since the graph has been optimized, the node may no longer
# exists
try:
col_op = dest_graph.as_graph_element(name)
except (TypeError, ValueError, KeyError) as e:
continue
dest_graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the
# fact that Python2 distinguishes between int and long, while
# Python3 has only int.
for value in field.value:
dest_graph.add_to_collection(key, int(value))
else:
for value in field.value:
dest_graph.add_to_collection(key,
ops.prepend_name_scope(value, scope))
# Write the transformed graphdef as SavedModel.
saved_model_builder = builder.SavedModelBuilder(output_saved_model_dir)
with ops.Graph().as_default():
importer.import_graph_def(self._converted_graph_def, name="")
_restore_collections(
ops.get_default_graph(), self._grappler_meta_graph_def,
self._collections_to_keep(
self._grappler_meta_graph_def.collection_def))
# We don't use any specific converter here.
with session.Session(config=self._session_config) as sess:
saved_model_builder.add_meta_graph_and_variables(
sess,
self._input_saved_model_tags,
signature_def_map=self._grappler_meta_graph_def.signature_def)
# Ignore other meta graphs from the input SavedModel.
saved_model_builder.save()
class TrtPrecisionMode(object):
FP32 = "FP32"
FP16 = "FP16"
INT8 = "INT8"
@staticmethod
def supported_precision_modes():
return [TrtPrecisionMode.FP32, TrtPrecisionMode.FP16, TrtPrecisionMode.INT8]
# Use a large enough number as the default max_workspace_size for TRT engines,
# so it can produce reasonable performance results with the default.
DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES = 1 << 30
# TrtConversionParams encapsulates the parameters that are used for TF-TRT
# conversion.
TrtConversionParams = collections.namedtuple(
"TrtConversionParams",
[
# A template RewriterConfig proto used to create a TRT-enabled
# RewriterConfig. If None, it will use a default one.
"rewriter_config_template",
# The maximum GPU temporary memory which the TRT engine can use at
# execution time. This corresponds to the 'workspaceSize' parameter of
# nvinfer1::IBuilder::setMaxWorkspaceSize().
"max_workspace_size_bytes",
# One of TrtPrecisionMode.supported_precision_modes().
"precision_mode",
# The minimum number of nodes required for a subgraph to be replaced by
# TRTEngineOp.
"minimum_segment_size",
# Whether to generate dynamic TRT ops which will build the TRT network
# and engine at run time.
"is_dynamic_op",
# Max number of cached TRT engines in dynamic TRT ops. If the number of
# cached engines is already at max but none of them can serve the input,
# the TRTEngineOp will fall back to run the TF function based on which
# the TRTEngineOp is created.
"maximum_cached_engines",
# This argument is ignored if precision_mode is not INT8. If set to
# True, a calibration graph will be created to calibrate the missing
# ranges. The calibration graph must be converted to an inference graph
# by running calibration with calibrate(). If set to False, quantization
# nodes will be expected for every tensor in the graph (exlcuding those
# which will be fused). If a range is missing, an error will occur.
# Please note that accuracy may be negatively affected if there is a
# mismatch between which tensors TRT quantizes and which tensors were
# trained with fake quantization.
"use_calibration",
# If set to True, it will create a FunctionDef for each subgraph that is
# converted to TRT op, and if TRT ops fail to execute at runtime, it'll
# invoke that function as a fallback.
"use_function_backup",
# Max size for the input batch.
# This option is deprecated in TF 2.0.
"max_batch_size",
# A list of batch sizes used to create cached engines, only used when
# is_dynamic_op is True. The length of the list should be <=
# maximum_cached_engines, and the dynamic TRT op will use this list to
# determine the batch sizes of the cached engines, instead of making the
# decision on the fly. This is useful when we know the most common batch
# size(s) the application is going to generate.
# This option is deprecated in TF 2.0.
"cached_engine_batches",
])
DEFAULT_TRT_CONVERSION_PARAMS = TrtConversionParams(
rewriter_config_template=None,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
use_calibration=True,
use_function_backup=True,
max_batch_size=1,
cached_engine_batches=None)
_TRT_ENGINE_CACHE_CONTAINER_NAME = "TF-TRT-Engine-Cache"
_TRT_ENGINE_OP_NAME = "TRTEngineOp"
def _check_conversion_params(conversion_params):
"""Validate the provided TrtConversionParams.
Args:
conversion_params: a TrtConversionParams instance.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
"""
supported_precision_modes = TrtPrecisionMode.supported_precision_modes()
if conversion_params.precision_mode not in supported_precision_modes:
raise ValueError(
("precision mode '{}' is not supported."
"It should be one of {}").format(conversion_params.precision_mode,
supported_precision_modes))
if conversion_params.cached_engine_batches:
if not isinstance(conversion_params.cached_engine_batches, list):
raise TypeError("cached_engine_batches should be a list.")
if len(conversion_params.cached_engine_batches
) > conversion_params.maximum_cached_engines:
raise ValueError("cached_engine_batches should not contain more than "
"maximum_cached_engines items.")
def _check_trt_version_compatibility():
"""Check compatibility of TensorRT version.
Raises:
RuntimeError: if the TensorRT library version is incompatible.
"""
compiled_version = wrap_py_utils.get_linked_tensorrt_version()
loaded_version = wrap_py_utils.get_loaded_tensorrt_version()
tf_logging.info("Linked TensorRT version: %s" % str(compiled_version))
tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
version_mismatch = False
if loaded_version[0] < compiled_version[0]:
tf_logging.error(
"TensorRT version mismatch. Tensorflow was compiled against " +
"TensorRT %s but library loaded from environment is TensorRT %s" %
(".".join([str(x) for x in compiled_version]),
".".join([str(x) for x in loaded_version])) +
". Please make sure that correct version of TensorRT " +
"is available in the system and added to ldconfig or LD_LIBRARY_PATH")
raise RuntimeError("Incompatible TensorRT library version")
for i in zip(loaded_version, compiled_version):
if i[0] != i[1]:
tf_logging.warn("TensorRT mismatch. Compiled against version " +
"%s, but loaded %s. Things may not work" %
(".".join([str(x) for x in compiled_version]),
".".join([str(x) for x in loaded_version])))
version_mismatch = True
break
if not version_mismatch:
tf_logging.info("Running against TensorRT version %s" %
".".join([str(x) for x in loaded_version]))
def get_tensorrt_rewriter_config(
conversion_params=DEFAULT_TRT_CONVERSION_PARAMS, is_v2=False):
"""Returns a RewriterConfig proto for TRT transformation.
Args:
conversion_params: a TrtConversionParams instance.
is_v2: whether we're getting a RewriterConfig for TF 2.0.
Returns:
A RewriterConfig proto which sets a TensorRTOptimizer to run Grappler.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
"""
if conversion_params.rewriter_config_template is not None and not isinstance(
conversion_params.rewriter_config_template,
rewriter_config_pb2.RewriterConfig):
raise TypeError(
"rewriter_config_template should be a RewriterConfig proto.")
_check_conversion_params(conversion_params)
rewriter_config_with_trt = rewriter_config_pb2.RewriterConfig()
if conversion_params.rewriter_config_template is None:
# Layout optimizer may add Const nodes followed by Reshape nodes, thus we
# need to run constant folding again.
rewriter_config_with_trt.optimizers.extend(
["constfold", "layout", "constfold"])
rewriter_config_with_trt.meta_optimizer_iterations = (
rewriter_config_pb2.RewriterConfig.ONE)
else:
rewriter_config_with_trt.CopyFrom(
conversion_params.rewriter_config_template)
optimizer = rewriter_config_with_trt.custom_optimizers.add()
# Add a constfold optimizer to cleanup the unused Const nodes.
rewriter_config_with_trt.custom_optimizers.add().name = "constfold"
optimizer.name = "TensorRTOptimizer"
optimizer.parameter_map[
"minimum_segment_size"].i = conversion_params.minimum_segment_size
optimizer.parameter_map[
"max_workspace_size_bytes"].i = conversion_params.max_workspace_size_bytes
optimizer.parameter_map["precision_mode"].s = _to_bytes(
conversion_params.precision_mode)
optimizer.parameter_map[
"maximum_cached_engines"].i = conversion_params.maximum_cached_engines
optimizer.parameter_map[
"use_calibration"].b = conversion_params.use_calibration
optimizer.parameter_map[
"use_function_backup"].b = conversion_params.use_function_backup
if is_v2:
# Static mode (a.k.a pre-generating TRT engines and make them node
# attributes) is deprecated in TF 2.0.
optimizer.parameter_map["is_dynamic_op"].b = True
else:
optimizer.parameter_map[
"max_batch_size"].i = conversion_params.max_batch_size
optimizer.parameter_map["is_dynamic_op"].b = conversion_params.is_dynamic_op
if conversion_params.cached_engine_batches:
optimizer.parameter_map["cached_engine_batches"].list.i.extend(
conversion_params.cached_engine_batches)
return rewriter_config_with_trt
class TrtGraphConverter(GraphConverter):
"""A GraphConverter for TRT transformation."""
# TODO(laigd): use TrtConversionParams here.
def __init__(self,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
input_graph_def=None,
nodes_blacklist=None,
session_config=None,
max_batch_size=1,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
cached_engine_batches=None,
use_calibration=True,
use_function_backup=True):
"""Initialize the converter.
Args:
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
input_graph_def: a GraphDef object containing a model to be transformed.
If set to None, the graph will be read from the SavedModel loaded from
input_saved_model_dir.
nodes_blacklist: list of node names to prevent the converter from
touching.
session_config: the ConfigProto used to create a Session. It's also used
as a template to create a TRT-enabled ConfigProto for conversion. If not
specified, a default ConfigProto will be used.
max_batch_size: max size for the input batch.
max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
engine can use at execution time. This corresponds to the
'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph
to be replaced by TRTEngineOp.
is_dynamic_op: whether to generate dynamic TRT ops which will build the
TRT network and engine at run time.
maximum_cached_engines: max number of cached TRT engines in dynamic TRT
ops. If the number of cached engines is already at max but none of them
can serve the input, the TRTEngineOp will fall back to run the TF
function based on which the TRTEngineOp is created.
cached_engine_batches: a list of batch sizes used to create cached
engines, only used when is_dynamic_op is True. The length of the list
should be <= maximum_cached_engines, and the dynamic TRT op will use
this list to determine the batch sizes of the cached engines, instead of
making the decision on the fly. This is useful when we know the most
common batch size(s) the application is going to generate.
use_calibration: this argument is ignored if precision_mode is not INT8.
If set to True, a calibration graph will be created to calibrate the
missing ranges. The calibration graph must be converted to an inference
graph by running calibration with calibrate(). If set to False,
quantization nodes will be expected for every tensor in the graph
(exlcuding those which will be fused). If a range is missing, an error
will occur. Please note that accuracy may be negatively affected if
there is a mismatch between which tensors TRT quantizes and which
tensors were trained with fake quantization.
use_function_backup: if set to True, it will create a FunctionDef for each
subgraph that is converted to TRT op, and if TRT ops fail to execute at
runtime, it'll invoke that function as a fallback.
Raises:
ValueError: if the combination of the parameters is invalid.
"""
super(TrtGraphConverter, self).__init__(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_tags=input_saved_model_tags,
input_saved_model_signature_key=input_saved_model_signature_key,
input_graph_def=input_graph_def,
nodes_blacklist=nodes_blacklist,
session_config=session_config)
_check_trt_version_compatibility()
self._need_calibration = (
precision_mode == TrtPrecisionMode.INT8 and use_calibration)
if self._need_calibration and not is_dynamic_op:
tf_logging.warn(
"INT8 precision mode with calibration is supported with "
"dynamic TRT ops only. Disregarding is_dynamic_op parameter.")
is_dynamic_op = True
# TODO(laigd): consider provide a mechanism to remove the fallback path
# after calibration is done.
if self._need_calibration and not use_function_backup:
raise ValueError(
"Calibration requires enabling fallback to TF function execution.")
# TODO(laigd):
# - Verify in int8 mode that maximum_cached_engines and
# cached_engine_batches are set appropriately.
# - If it fails to build the int8 engine it should return error.
rewriter_config_template = None
if (session_config and session_config.HasField("graph_options") and
session_config.graph_options.HasField("rewrite_options")):
rewriter_config_template = session_config.graph_options.rewrite_options
self._conversion_params = TrtConversionParams(
rewriter_config_template=rewriter_config_template,
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size,
is_dynamic_op=is_dynamic_op,
maximum_cached_engines=maximum_cached_engines,
use_calibration=use_calibration,
use_function_backup=use_function_backup,
max_batch_size=max_batch_size,
cached_engine_batches=cached_engine_batches)
_check_conversion_params(self._conversion_params)
def get_rewriter_config(self):
return get_tensorrt_rewriter_config(
conversion_params=self._conversion_params)
def finalize_calibration(self):
assert self._need_calibration
assert self._converted
assert not self._calibration_data_collected
# TODO(laigd): a better way would be to use self._calibration_sess to list
# all the devices, add one get_calibration_data for each device, and
# fetch each such op for every resource until its found. This can work
# even when the device of the TRTEngineOp is empty or not fully specified.
# Maps device name to the corresponding get_calibration_data.
device_to_get_resource_op_map = {}
with self._calibration_graph.as_default():
resource_name_input = array_ops.placeholder(dtypes.string)
for node in self._converted_graph_def.node:
if node.op == _TRT_ENGINE_OP_NAME:
# Adds the get_calibration_data op for the device if not done before.
# We only add one such op for each device.
# TODO(laigd): What if the device is empty?????
if node.device not in device_to_get_resource_op_map:
with self._calibration_graph.device(node.device):
serialized_resources_output = (
gen_trt_ops.get_calibration_data_op(resource_name_input))
device_to_get_resource_op_map[node.device] = (
serialized_resources_output)
# Get the calibration resource.
calibration_result = self._calibration_sess.run(
device_to_get_resource_op_map[node.device],
feed_dict={resource_name_input: node.name})
node.attr["calibration_data"].s = calibration_result
self._calibration_data_collected = True
self._calibration_sess.close()
def save(self, output_saved_model_dir):
"""Save the converted graph as a SavedModel."""
if self._need_calibration:
assert self._calibration_data_collected
super(TrtGraphConverter, self).save(output_saved_model_dir)
class TRTEngineResource(tracking.TrackableResource):
"""Class to track the serialized engines resource."""
def __init__(self, resource_name, filename, maximum_cached_engines):
super(TRTEngineResource, self).__init__()
self._resource_name = resource_name
# Track the serialized engine file in the SavedModel.
self._filename = self._track_trackable(
tracking.TrackableAsset(filename), "_serialized_trt_engine_filename")
self._maximum_cached_engines = maximum_cached_engines
def _create_resource(self):
return gen_trt_ops.create_trt_engine_cache(
container=_TRT_ENGINE_CACHE_CONTAINER_NAME,
resource_name=self._resource_name,
max_cached_engines_count=self._maximum_cached_engines)
def _initialize(self):
gen_trt_ops.populate_trt_engine_cache(self.resource_handle, self._filename)
class TrtGraphConverterV2(object):
"""An offline converter for TF-TRT transformation for TF 2.0 SavedModels.
To run the conversion without quantization calibration (e.g. for FP32/FP16
precision modes):
```python
params = DEFAULT_TRT_CONVERSION_PARAMS._replace(precision_mode='FP16')
converter = TrtGraphConverterV2(
input_saved_model_dir="my_dir", conversion_params=params)
converter.convert()
converter.save(output_saved_model_dir)
```
As a result, a TF-TRT converted SavedModel will be generated and saved to
`output_saved_model_dir`. The SavedModel will have TRT compatible subgraph
replaced by TRTEngineOps, but no TRT engines will be pre-built until execution
time. We can also build the TRT engines offline by running the converted
function with some input data:
```python
params = DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode='FP16',
# Set this to a large enough number so it can cache all the TRT engines.
maximum_cached_engines=16)
converter = TrtGraphConverterV2(
input_saved_model_dir="my_dir", conversion_params=params)
converted_func = converter.convert()
for data in my_input_data:
converted_func(my_input_data)
converter.save(output_saved_model_dir)
```
In this way, for each unique shapes of the inputs to the TRTEngineOp, if it
cannot be handled by any previously generated TRT engine, a new engine will be
generated and serialized to the output SavedModel in `output_saved_model_dir`.
This is good for applications that cannot afford building TRT engines at
runtime but have access to input data that is similar to the one used in
production (for example, that will result in the same input shapes to the
TRTEngineOps). Also, the generated TRT engines is platform dependent, so we
need to run `converted_func` in an environment that is similar to production
(at least with same type of GPU).
TODO(laigd/hinsu): running conversion with calibration in INT8 mode should
follow exactly the same steps.
"""
def __init__(self,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
conversion_params=DEFAULT_TRT_CONVERSION_PARAMS):
"""Initialize the converter.
Args:
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
conversion_params: a TrtConversionParams instance.
Raises:
ValueError: if the combination of the parameters is invalid.
"""
assert context.executing_eagerly()
_check_trt_version_compatibility()
_check_conversion_params(conversion_params)
self._conversion_params = conversion_params
self._input_saved_model_dir = input_saved_model_dir
self._input_saved_model_tags = (
input_saved_model_tags or [tag_constants.SERVING])
self._input_saved_model_signature_key = (
input_saved_model_signature_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
self._need_calibration = (
conversion_params.precision_mode == TrtPrecisionMode.INT8 and
conversion_params.use_calibration)
if (self._need_calibration and not conversion_params.is_dynamic_op):
raise ValueError("INT8 precision mode with calibration is not supported "
"with static TensorRT ops. Set is_dynamic_op to True.")
self._converted = False
def _run_conversion(self, meta_graph_def):
"""Run Grappler's OptimizeGraph() tool to convert the graph.
Args:
meta_graph_def: the MetaGraphDef instance to run the optimizations on.
Returns:
The optimized GraphDef.
"""
rewriter_config = get_tensorrt_rewriter_config(
conversion_params=self._conversion_params, is_v2=True)
grappler_session_config = config_pb2.ConfigProto()
grappler_session_config.graph_options.rewrite_options.CopyFrom(
rewriter_config)
return tf_optimizer.OptimizeGraph(
grappler_session_config, meta_graph_def, graph_id=b"tf_graph")
# TODO(laigd): provide a utility function to optimize a ConcreteFunction and
# use it here (b/124792963).
def convert(self):
"""Convert the input SavedModel in 2.0 format.
Returns:
The TF-TRT converted Function.
"""
assert not self._converted
self._saved_model = load.load(self._input_saved_model_dir,
self._input_saved_model_tags)
func = self._saved_model.signatures[self._input_saved_model_signature_key]
frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)
grappler_meta_graph_def = saver.export_meta_graph(
graph_def=frozen_func.graph.as_graph_def(), graph=frozen_func.graph)
# Add a collection 'train_op' so that Grappler knows the outputs.
fetch_collection = meta_graph_pb2.CollectionDef()
for array in frozen_func.inputs + frozen_func.outputs:
fetch_collection.node_list.value.append(array.name)
grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
fetch_collection)
# Run TRT optimizer in Grappler to convert the graph.
self._converted_graph_def = self._run_conversion(grappler_meta_graph_def)
self._converted_func = wrap_function.function_from_graph_def(
self._converted_graph_def,
[tensor.name for tensor in frozen_func.inputs],
[tensor.name for tensor in frozen_func.outputs])
self._converted = True
# Wrap the converted ConcreteFunction in a Function so it can accept numpy
# arrays as input.
@def_function.function
def wrapper_func(*args, **kwargs):
return self._converted_func(*args, **kwargs)
return wrapper_func
def save(self, output_saved_model_dir):
"""Save the converted SavedModel.
Args:
output_saved_model_dir: directory to saved the converted SavedModel.
"""
assert self._converted
# Serialize the TRT engines in the cache if any, and create trackable
# resource to track them.
engine_asset_dir = tempfile.mkdtemp()
resource_map = {}
def _serialize_and_track_engine(canonical_engine_name):
"""Serialize TRT engines in the cache and track them."""
# Don't dump the same cache twice.
if canonical_engine_name in resource_map:
return
filename = os.path.join(engine_asset_dir,
"trt-serialized-engine." + canonical_engine_name)
try:
gen_trt_ops.dump_trt_engine_cache(
container=_TRT_ENGINE_CACHE_CONTAINER_NAME,
resource_name=canonical_engine_name,
filename=filename,
delete_cache_after_dump=True)
except errors.NotFoundError:
# If user haven't run the function to populate the engine, it's fine,
# and we don't need to track any serialized TRT engines.
return
resource_map[canonical_engine_name] = TRTEngineResource(
canonical_engine_name, filename,
self._conversion_params.maximum_cached_engines)
# Remove all scope prefixes in the node name. In TF 2.0, the same concrete
# function can be initialized multiple times with different prefixes, and
# this will result in the same TRTEngineOp being initialized multiple times
# with different cache and duplicate TRT engines.
# TODO(laigd): this may be caused by the fact that TRTEngineOp is not
# stataful, need to investigate.
# TODO(laigd): we rely on the fact that all functions are fully inlined
# before TF-TRT optimizer is called, as otherwise it may generate the same
# name when optimizing a different function graph. Fix this.
canonical_engine_name = lambda node: node.name.split("/")[-1]
for node in self._converted_graph_def.node:
if node.op == _TRT_ENGINE_OP_NAME:
_serialize_and_track_engine(canonical_engine_name(node))
for func in self._converted_graph_def.library.function:
for node in func.node_def:
if node.op == _TRT_ENGINE_OP_NAME:
_serialize_and_track_engine(canonical_engine_name(node))
self._saved_model.trt_engine_resources = resource_map
# Rewrite the signature map using the optimized ConcreteFunction.
signatures = {
key: value for key, value in self._saved_model.signatures.items()
}
signatures[self._input_saved_model_signature_key] = self._converted_func
save.save(self._saved_model, output_saved_model_dir, signatures)
# TODO(laigd): use TrtConversionParams here.
def create_inference_graph(
input_graph_def,
outputs,
max_batch_size=1,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
cached_engine_batches=None,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
output_saved_model_dir=None,
session_config=None):
"""Python wrapper for the TRT transformation.
Args:
input_graph_def: a GraphDef object containing a model to be transformed. If
set to None, the graph will be read from the SavedModel loaded from
input_saved_model_dir.
outputs: list of tensors or node names for the model outputs. Only used when
input_graph_def is not None.
max_batch_size: max size for the input batch.
max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
engine can use at execution time. This corresponds to the 'workspaceSize'
parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph to
be replaced by TRTEngineOp.
is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT
network and engine at run time.
maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops.
If the number of cached engines is already at max but none of them can
serve the input, the TRTEngineOp will fall back to run the TF function
based on which the TRTEngineOp is created.
cached_engine_batches: a list of batch sizes used to create cached engines,
only used when is_dynamic_op is True. The length of the list should be <=
maximum_cached_engines, and the dynamic TRT op will use this list to
determine the batch sizes of the cached engines, instead of making the
decision on the fly. This is useful when we know the most common batch
size(s) the application is going to generate.
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
output_saved_model_dir: if not None, construct a SavedModel using the
returned GraphDef and save it to the specified directory. This option only
works when the input graph is loaded from a SavedModel, i.e. when
input_saved_model_dir is specified and input_graph_def is None.
session_config: the ConfigProto used to create a Session. It's also used as
a template to create a TRT-enabled ConfigProto for conversion. If not
specified, a default ConfigProto will be used.
Returns:
A GraphDef transformed from input_graph_def (or the SavedModel graph def
loaded from input_saved_model_dir, if input_graph_def is not present), where
all TRT compatible subgraphs are replaced with TRTEngineOps, and a TF
function is added for each of the subgraphs.
If is_dynamic_op is True, each TRTEngineOp will contain a serialized
subgraph GraphDef, which will be converted to a TRT engine at execution time
and the TRT engine will be cached for future usage. A new TRT engine will be
created each time when none of the cached engines match the input shapes. If
it fails to execute the TRT engine or the number of cached engines reaches
maximum_cached_engines, the op will fall back to call the corresponding TF
function.
If is_dynamic_op is False, each TRTEngineOp will contain a serialized TRT
engine created from the corresponding subgraph. No more engines will be
created on the fly, and the op will fall back to call the corresponding TF
function when it fails to execute the engine.
Raises:
ValueError: if the combination of the parameters is invalid.
"""
trt_converter = TrtGraphConverter(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_tags=input_saved_model_tags,
input_saved_model_signature_key=input_saved_model_signature_key,
input_graph_def=input_graph_def,
nodes_blacklist=outputs,
session_config=session_config,
max_batch_size=max_batch_size,
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size,
is_dynamic_op=is_dynamic_op,
maximum_cached_engines=maximum_cached_engines,
cached_engine_batches=cached_engine_batches,
use_calibration=False)
converted_graph_def = trt_converter.convert()
if output_saved_model_dir:
trt_converter.save(output_saved_model_dir)
return converted_graph_def
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/trt_convert.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import is_tensorrt_enabled
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.tools import saved_model_utils
from tensorflow.python.training.tracking import tracking
_SAVED_MODEL_SIGNATURE_KEY = "mypredict"
class TrtConvertTest(test_util.TensorFlowTestCase):
"""Class to test Tensorflow-TensorRT integration python API."""
# Use a small max_workspace_size for tests so they don't consume too much GPU
# memory.
_TRT_MAX_WORKSPACE_SIZE_BYTES = 2 << 20
def testGetTensorrtRewriterConfig(self):
"""Test case for TrtGraphConverter.get_tensorrt_rewriter_config()."""
if not is_tensorrt_enabled():
return
conversion_params = trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace(
max_batch_size=128,
max_workspace_size_bytes=1234,
precision_mode="INT8",
minimum_segment_size=10,
is_dynamic_op=True,
maximum_cached_engines=2,
cached_engine_batches=[1, 128])
rewriter_cfg = trt_convert.get_tensorrt_rewriter_config(
conversion_params=conversion_params)
self.assertEqual(["constfold", "layout", "constfold"],
rewriter_cfg.optimizers)
self.assertEqual(rewriter_config_pb2.RewriterConfig.ONE,
rewriter_cfg.meta_optimizer_iterations)
trt_optimizer = None
for optimizer in rewriter_cfg.custom_optimizers:
if optimizer.name == "TensorRTOptimizer":
self.assertTrue(trt_optimizer is None)
trt_optimizer = optimizer
self.assertTrue(trt_optimizer is not None)
for key in [
"minimum_segment_size", "max_batch_size", "is_dynamic_op",
"max_workspace_size_bytes", "precision_mode", "maximum_cached_engines",
"cached_engine_batches"
]:
self.assertTrue(key in trt_optimizer.parameter_map)
self.assertEqual(10, trt_optimizer.parameter_map["minimum_segment_size"].i)
self.assertEqual(128, trt_optimizer.parameter_map["max_batch_size"].i)
self.assertEqual(True, trt_optimizer.parameter_map["is_dynamic_op"].b)
self.assertEqual(1234,
trt_optimizer.parameter_map["max_workspace_size_bytes"].i)
self.assertEqual(
trt_convert._to_bytes("INT8"),
trt_optimizer.parameter_map["precision_mode"].s)
self.assertEqual(2, trt_optimizer.parameter_map["maximum_cached_engines"].i)
self.assertEqual(
[1, 128], trt_optimizer.parameter_map["cached_engine_batches"].list.i)
def _GetConfigProto(self):
"""Get ConfigProto for session creation."""
config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(allow_growth=True))
return config
@classmethod
def _GetGraph(cls, inp, var):
"""Get the graph for testing."""
# The graph computes (input+1)^2, it looks like:
#
# input (Placeholder) v1 (Variable)
# | \ /
# \ +
# \ / \
# * |
# \ /
# +
# |
# output (Identity)
add = inp + var
mul = inp * add
add = mul + add
out = array_ops.identity(add, name="output")
return out
def _GetModelForV2(self):
class SimpleModel(tracking.AutoTrackable):
def __init__(self):
self.v = None
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 1, 1], dtype=dtypes.float32)
])
def run(self, inp):
if self.v is None:
self.v = variables.Variable([[[1.0]]], dtype=dtypes.float32)
return TrtConvertTest._GetGraph(inp, self.v)
return SimpleModel()
def _GetGraphForV1(self):
g = ops.Graph()
with g.as_default():
with g.device("/GPU:0"):
inp = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 1], name="input")
var = variables.Variable([[[1.0]]], dtype=dtypes.float32, name="v1")
out = TrtConvertTest._GetGraph(inp, var)
return g, var, inp, out
def _GetGraphDef(self):
"""Get the graph def for testing."""
g, var, _, _ = self._GetGraphForV1()
with self.session(graph=g, config=self._GetConfigProto()) as sess:
sess.run(var.initializer)
graph_def = graph_util.convert_variables_to_constants(
sess, g.as_graph_def(add_shapes=True), ["output"])
node_name_to_op = {node.name: node.op for node in graph_def.node}
self.assertEqual(
{
"v1": "Const",
"add/ReadVariableOp": "Identity",
"input": "Placeholder",
"add": "Add",
"mul": "Mul",
"add_1": "Add",
"output": "Identity"
}, node_name_to_op)
return graph_def
def _WriteInputSavedModel(self, input_saved_model_dir):
"""Write the saved model as an input for testing."""
g, var, inp, out = self._GetGraphForV1()
signature_def = signature_def_utils.build_signature_def(
inputs={"myinput": utils.build_tensor_info(inp)},
outputs={"myoutput": utils.build_tensor_info(out)},
method_name=signature_constants.PREDICT_METHOD_NAME)
saved_model_builder = builder.SavedModelBuilder(input_saved_model_dir)
with self.session(graph=g, config=self._GetConfigProto()) as sess:
sess.run(var.initializer)
saved_model_builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={_SAVED_MODEL_SIGNATURE_KEY: signature_def})
saved_model_builder.save()
def _ConvertGraph(self,
input_saved_model_dir=None,
output_saved_model_dir=None,
need_calibration=False,
max_batch_size=1,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
use_function_backup=False):
"""Helper method to convert a GraphDef or SavedModel using TF-TRT."""
converter = trt_convert.TrtGraphConverter(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_signature_key=_SAVED_MODEL_SIGNATURE_KEY,
input_graph_def=None if input_saved_model_dir else self._GetGraphDef(),
nodes_blacklist=None if input_saved_model_dir else ["output"],
session_config=self._GetConfigProto(),
max_batch_size=max_batch_size,
max_workspace_size_bytes=TrtConvertTest._TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=(trt_convert.TrtPrecisionMode.INT8 if need_calibration
else trt_convert.TrtPrecisionMode.FP32),
minimum_segment_size=minimum_segment_size,
is_dynamic_op=is_dynamic_op,
maximum_cached_engines=maximum_cached_engines,
use_function_backup=use_function_backup)
output_graph_def = converter.convert()
if need_calibration:
class CalibrationData(object):
def __init__(self):
self._data = 0
def next(self):
self._data += 1
return {"input:0": [[[self._data]]]}
output_graph_def = converter.calibrate(
fetch_names=["output:0"],
num_runs=10,
feed_dict_fn=CalibrationData().next)
if output_saved_model_dir is not None:
converter.save(output_saved_model_dir=output_saved_model_dir)
return output_graph_def
def _TestTrtGraphConverter(self,
input_saved_model_dir=None,
output_saved_model_dir=None,
need_calibration=False,
is_dynamic_op=False):
"""General method to test trt_convert.TrtGraphConverter()."""
output_graph_def = self._ConvertGraph(
input_saved_model_dir=input_saved_model_dir,
output_saved_model_dir=output_saved_model_dir,
need_calibration=need_calibration,
is_dynamic_op=is_dynamic_op,
use_function_backup=need_calibration)
graph_defs_to_verify = [output_graph_def]
if output_saved_model_dir:
saved_model_graph_def = saved_model_utils.get_meta_graph_def(
output_saved_model_dir, tag_constants.SERVING).graph_def
self.assertIsInstance(saved_model_graph_def, graph_pb2.GraphDef)
graph_defs_to_verify.append(saved_model_graph_def)
for graph_def in graph_defs_to_verify:
node_name_to_op = {node.name: node.op for node in graph_def.node}
self.assertEqual(
{
"input": "Placeholder",
"TRTEngineOp_0": "TRTEngineOp",
"output": "Identity"
}, node_name_to_op)
if need_calibration:
trt_engine_nodes = [
node for node in graph_def.node if node.op == "TRTEngineOp"
]
self.assertNotEmpty(trt_engine_nodes)
for node in trt_engine_nodes:
self.assertTrue(len(node.attr["calibration_data"].s))
# Run the calibrated graph.
# TODO(laigd): consider having some input where the answer is different.
with ops.Graph().as_default():
importer.import_graph_def(graph_def, name="")
with self.session(config=self._GetConfigProto()) as sess:
for test_data in range(10):
self.assertEqual(
(test_data + 1.0)**2,
sess.run("output:0", feed_dict={"input:0": [[[test_data]]]}))
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_BasicConversion(self):
"""Test case for trt_convert.TrtGraphConverter()."""
if not is_tensorrt_enabled():
return
tmp_dir = self.get_temp_dir()
input_saved_model_dir = os.path.join(tmp_dir, "in_dir1")
self._WriteInputSavedModel(input_saved_model_dir)
for need_calibration in [False, True]:
# Use GraphDef as input.
self._TestTrtGraphConverter()
# Use SavedModel as input.
output_saved_model_dir = os.path.join(
tmp_dir, "out_dir1%s" % ("_int8" if need_calibration else ""))
self._TestTrtGraphConverter(
input_saved_model_dir=input_saved_model_dir,
output_saved_model_dir=output_saved_model_dir,
need_calibration=need_calibration)
@test_util.run_v2_only
def testTrtGraphConverter_BasicConversion_v2(self):
"""Test case for trt_convert.TrtGraphConverter()."""
if not is_tensorrt_enabled():
return
np_input = np.random.random_sample([4, 1, 1]).astype(np.float32)
# Create a model and save it.
input_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
root = self._GetModelForV2()
expected_output = root.run(np_input)
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
# Run TRT conversion.
converter = trt_convert.TrtGraphConverterV2(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_signature_key=_SAVED_MODEL_SIGNATURE_KEY,
conversion_params=trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt_convert.TrtPrecisionMode.FP32,
is_dynamic_op=True,
maximum_cached_engines=2,
use_function_backup=False))
converted_func = converter.convert()
def _check_trt_ops(graph_def):
trt_op_names = [
node.name for node in graph_def.node if node.op == "TRTEngineOp"
]
for func in graph_def.library.function:
for node in func.node_def:
if node.op == "TRTEngineOp":
trt_op_names.append(node.name)
self.assertEqual(1, len(trt_op_names))
self.assertIn("TRTEngineOp_0", trt_op_names[0])
# Verify the converted GraphDef and ConcreteFunction.
self.assertIsInstance(converted_func, def_function.Function)
converted_concrete_func = converted_func.get_concrete_function(
tensor_spec.TensorSpec(shape=[None, 1, 1], dtype=dtypes.float32))
_check_trt_ops(converted_concrete_func.graph.as_graph_def())
# Save the converted model without any TRT engine cache.
output_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
converter.save(output_saved_model_dir)
unexpected_asset_file = os.path.join(
output_saved_model_dir, "assets/trt-serialized-engine.TRTEngineOp_0")
self.assertFalse(os.path.exists(unexpected_asset_file))
# Run the converted function to populate the engine cache.
output_with_trt = converted_func(np_input)
self.assertEqual(1, len(output_with_trt))
self.assertAllClose(
expected_output, output_with_trt[0], atol=1e-6, rtol=1e-6)
# Save the converted model again with serialized engine cache.
output_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
converter.save(output_saved_model_dir)
expected_asset_file = os.path.join(
output_saved_model_dir, "assets/trt-serialized-engine.TRTEngineOp_0")
self.assertTrue(os.path.exists(expected_asset_file))
self.assertTrue(os.path.getsize(expected_asset_file))
# Load and verify the converted model.
#
# TODO(laigd): the name of then new input_signature of the
# `root_with_trt.run` function is empty string (originaly was None),
# investigate why.
root_with_trt = load.load(output_saved_model_dir)
# TODO(laigd): `root_with_trt.run` is still using the original graph without
# trt. Consider changing that.
# _check_trt_ops(
# root_with_trt.run.get_concrete_function().graph.as_graph_def())
converted_signature = root_with_trt.signatures[_SAVED_MODEL_SIGNATURE_KEY]
_check_trt_ops(converted_signature.graph.as_graph_def())
output_with_trt = converted_signature(ops.convert_to_tensor(np_input))
# The output of running the converted signature is a dict due to
# compatibility reasons with V1 SavedModel signature mechanism.
output_with_trt = output_with_trt[output_with_trt.keys()[0]]
self.assertAllClose(expected_output, output_with_trt, atol=1e-6, rtol=1e-6)
def _TestRun(self,
sess,
batch_size,
use_function_backup=False,
expect_engine_is_run=True):
try:
result = sess.run(
"output:0", feed_dict={"input:0": [[[1.0]]] * batch_size})
self.assertAllEqual([[[4.0]]] * batch_size, result)
except errors.OpError as e:
# This should happen only when fallback path is disabled and TRT engine
# fails to run.
self.assertTrue(not use_function_backup and not expect_engine_is_run)
self.assertIn("Fallback path is disabled, for TRTEngineOp_0", str(e))
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_MinimumSegmentSize(self):
if not is_tensorrt_enabled():
return
output_graph_def = self._ConvertGraph(minimum_segment_size=5)
node_name_to_op = {node.name: node.op for node in output_graph_def.node}
self.assertEqual(
{
"add/ReadVariableOp": "Const",
"input": "Placeholder",
"add": "Add",
"mul": "Mul",
"add_1": "Add",
"output": "Identity"
}, node_name_to_op)
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_DynamicOp(self):
if not is_tensorrt_enabled():
return
tmp_dir = self.get_temp_dir()
input_saved_model_dir = os.path.join(tmp_dir, "in_dir2")
output_saved_model_dir = os.path.join(tmp_dir, "out_dir2")
self._WriteInputSavedModel(input_saved_model_dir)
output_graph_def = self._ConvertGraph(
input_saved_model_dir=input_saved_model_dir,
output_saved_model_dir=output_saved_model_dir,
is_dynamic_op=True,
maximum_cached_engines=2,
use_function_backup=False) # Disallow fallback.
# Test the output GraphDef.
with ops.Graph().as_default():
importer.import_graph_def(output_graph_def, name="")
with self.session(config=self._GetConfigProto()) as sess:
# Run with batch size 1, a new engine is created and cached.
self._TestRun(sess, 1)
# Run with batch size 2, a new engine is created and cached.
self._TestRun(sess, 2)
# Run with batch size 3, since the number of cached engines has reached
# the max, it should evict an old engine and create a new one.
self._TestRun(sess, 3)
# Test the output SavedModel
with ops.Graph().as_default():
with self.session(config=self._GetConfigProto()) as sess:
loader.load(sess, [tag_constants.SERVING], output_saved_model_dir)
# Run with batch size 1, a new engine is created and cached.
self._TestRun(sess, 1)
# Run with batch size 2, a new engine is created and cached.
self._TestRun(sess, 2)
# Run with batch size 3, since the number of cached engines has reached
# the max, it should evict an old engine and create a new one.
self._TestRun(sess, 3)
def _TestStaticOp(self, use_function_backup):
if not is_tensorrt_enabled():
return
tmp_dir = self.get_temp_dir()
input_saved_model_dir = os.path.join(tmp_dir, "in_dir3")
output_saved_model_dir = os.path.join(tmp_dir, "out_dir3")
self._WriteInputSavedModel(input_saved_model_dir)
output_graph_def = self._ConvertGraph(
input_saved_model_dir=input_saved_model_dir,
output_saved_model_dir=output_saved_model_dir,
maximum_cached_engines=2, # This is noop, added just for testing.
use_function_backup=use_function_backup)
# Test the output GraphDef.
with ops.Graph().as_default():
importer.import_graph_def(output_graph_def, name="")
with self.session(config=self._GetConfigProto()) as sess:
# Run with batch size 1, the default engine embedded in the graphdef
# will be used.
self._TestRun(
sess,
1,
use_function_backup=use_function_backup,
expect_engine_is_run=True)
# Run with batch size 2, which exceed the max_batch_size, it should try
# to fall back to TF function.
self._TestRun(
sess,
2,
use_function_backup=use_function_backup,
expect_engine_is_run=False)
# Test the output SavedModel
with ops.Graph().as_default():
with self.session(config=self._GetConfigProto()) as sess:
loader.load(sess, [tag_constants.SERVING], output_saved_model_dir)
# Run with batch size 1, the default engine embedded in the graphdef
# will be used.
self._TestRun(
sess,
1,
use_function_backup=use_function_backup,
expect_engine_is_run=True)
# Run with batch size 2, which exceed the max_batch_size, it should try
# to fall back to TF function.
self._TestRun(
sess,
2,
use_function_backup=use_function_backup,
expect_engine_is_run=False)
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_StaticOp_NoFallback(self):
self._TestStaticOp(use_function_backup=False)
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_StaticOp_WithFallback(self):
self._TestStaticOp(use_function_backup=True)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/trt_convert_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the python wrapper for TensorRT graph transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.compiler.tensorrt import trt_convert as trt
# pylint: enable=unused-import,line-too-long
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class SimpleSingleEngineTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing single segment."""
dtype = inp.dtype
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],
name="bias",
dtype=dtype)
added = nn.bias_add(conv, bias, name="bias_add")
relu = nn.relu(added, "relu")
identity = array_ops.identity(relu, "identity")
pool = nn_ops.max_pool(
identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
return array_ops.squeeze(pool, name="output_0")
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 6, 6, 6]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"weights", "conv", "bias", "bias_add", "relu", "identity",
"max_pool"
]
}
class SimpleMultiEnginesTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
dtype = inp.dtype
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(12, 12, 6), dtype=dtype, name="c1")
p = math_ops.mul(conv, c1, name="mul")
c2 = constant_op.constant(
np.random.randn(12, 12, 6), dtype=dtype, name="c2")
q = math_ops.div(conv, c2, name="div")
edge = self.trt_incompatible_op(q, name="incompatible")
edge = math_ops.div(edge, edge, name="div1")
r = math_ops.add(edge, edge, name="add")
p = math_ops.sub(p, edge, name="sub")
q = math_ops.mul(q, edge, name="mul1")
s = math_ops.add(p, q, name="add1")
s = math_ops.sub(s, r, name="sub1")
return array_ops.squeeze(s, name="output_0")
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 12, 12, 6]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"add", "add1", "c1", "div1", "mul", "mul1", "sub", "sub1"
],
"TRTEngineOp_1": ["c2", "conv", "div", "weights"]
}
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
return super(
SimpleMultiEnginesTest, self
).GetConversionParams(run_params)._replace(
# Disable layout optimizer, since it'll add Transpose(Const, Const) to
# the graph and breaks the conversion check.
rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig())
class SimpleMultiEnginesTest2(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing two segment."""
n = inp
for i in range(2):
c = constant_op.constant(1.0, name="c%d" % i)
n = math_ops.add(n, c, name="add%d" % i)
n = math_ops.mul(n, n, name="mul%d" % i)
edge = self.trt_incompatible_op(n, name="incompatible")
with ops.control_dependencies([edge]):
c = constant_op.constant(1.0, name="c2")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul2")
c = constant_op.constant(1.0, name="c3")
n = math_ops.add(n, c, name="add3")
n = math_ops.mul(n, n, name="mul3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["c0", "c1", "add0", "add1", "mul0", "mul1"],
"TRTEngineOp_1": ["c2", "c3", "add2", "add3", "mul2", "mul3"]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# Disable the test in fp16 mode since multiple matmul and add ops together
# can cause overflow.
return ((run_params.precision_mode != "FP16") and
not (trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.use_calibration))
class ConstInputTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
n = inp
c = constant_op.constant(1.0, name="c")
# Adds control dependency from the constant op to a trt incompatible op,
# and adds control dependency from the trt incompatible op to all other
# ops, to make sure the constant op cannot be contracted with any trt
# segment that depends on it.
with ops.control_dependencies([c]):
d = self.trt_incompatible_op(n, name="incompatible")
with ops.control_dependencies([d]):
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
n = self.trt_incompatible_op(n, name="incompatible1")
with ops.control_dependencies([d]):
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul1")
n = math_ops.add(n, n, name="add3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["add", "add1", "mul"],
"TRTEngineOp_1": ["add2", "add3", "mul1"]
}
class ConstDataInputSingleEngineTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing single segment."""
n = inp
c = constant_op.constant(1.0, name="c")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {"TRTEngineOp_0": ["c", "add", "add1", "mul"]}
class ConstDataInputMultipleEnginesTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
n = inp
c = constant_op.constant(1.0, name="c")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
n = self.trt_incompatible_op(n, name="incompatible1")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul1")
n = math_ops.add(n, n, name="add3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["add2", "add3", "mul1"],
# Why segment ["add", "add1", "mul"] was assigned segment id 1
# instead of 0: the parent node of this segment is actually const
# node 'c', but it's removed later since it's const output of the
# segment which is not allowed.
"TRTEngineOp_1": ["add", "add1", "mul"]
}
class ControlDependencyTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
c1 = constant_op.constant(1.0, name="c1")
c2 = constant_op.constant(1.0, name="c2")
d1 = constant_op.constant(1.0, name="d1")
d2 = self.trt_incompatible_op(inp, name="d2")
with ops.control_dependencies([d1, d2]):
add = math_ops.add(inp, c1, name="add")
with ops.control_dependencies([d1, d2]):
mul = math_ops.mul(add, add, name="mul")
with ops.control_dependencies([d1, d2]):
add1 = math_ops.add(mul, mul, name="add1")
edge = self.trt_incompatible_op(add1, name="incompatible")
with ops.control_dependencies([d1, d2, add, mul]):
add2 = math_ops.add(edge, c2, name="add2")
with ops.control_dependencies([d1, d2, add1, mul]):
mul1 = math_ops.mul(add2, add2, name="mul1")
with ops.control_dependencies([d1, d2, add, add1]):
add3 = math_ops.add(mul1, mul1, name="add3")
return array_ops.squeeze(add3, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["c1", "add", "add1", "mul"],
"TRTEngineOp_1": ["c2", "add2", "add3", "mul1"]
}
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/base_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _GraphFn(x, add_quantization_nodes):
def _Quantize(x, r):
if add_quantization_nodes:
x = gen_array_ops.fake_quant_with_min_max_vars(x, -r, r)
return x
x = _Quantize(x, 10.0)
x = x + 5
x = _Quantize(x, 15.0)
x = x - 5
x = _Quantize(x, 10.0)
x = x * 0.1
x = _Quantize(x, 1.0)
w = constant_op.constant(np.ones((8, 1)), dtype=dtypes.float32)
x = math_ops.matmul(x, w)
x = _Quantize(x, 10.0)
return array_ops.identity(x, name="output_0")
def _GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[8, 8]], [[8, 1]])
class QuantizationMissingAllRangesTest(trt_test.TfTrtIntegrationTestBase):
"""Create a graph containing single segment with no quantization ranges."""
def GraphFn(self, x):
return _GraphFn(x, add_quantization_nodes=False)
def GetParams(self):
return _GetParams(self)
def ShouldRunTest(self, run_params):
if get_linked_tensorrt_version()[0] < 5:
return False
# Only test static engine mode, with or without calibration.
return (trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.convert_online and not run_params.dynamic_engine)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
if run_params.use_calibration:
# In static engine mode with calibration, it should build a calibration
# engine.
return ["TRTEngineOp_0"]
# In static engine mode without calibration, the engine building will fail
# since no quantization ranges are set, which results in no TRT nodes.
return []
class QuantizationWithRangesTest(trt_test.TfTrtIntegrationTestBase):
"""Create a graph containing single segment with no quantization ranges."""
def GraphFn(self, x):
return _GraphFn(x, add_quantization_nodes=True)
def GetParams(self):
return _GetParams(self)
def ShouldRunTest(self, run_params):
if get_linked_tensorrt_version()[0] < 5:
return False
# Test static/dynamic engine with/without calibration.
return (trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.convert_online)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
class NonQuantizedPrecisionsWithRangesTest(trt_test.TfTrtIntegrationTestBase):
"""Create a graph containing single segment with no quantization ranges."""
def GraphFn(self, x):
return _GraphFn(x, add_quantization_nodes=True)
def GetParams(self):
return _GetParams(self)
def ShouldRunTest(self, run_params):
# Only test FP32/FP16 mode.
return not trt_test.IsQuantizationMode(run_params.precision_mode)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
# The fake quant ops are not supported in FP32/FP16 mode, and will split the
# graph into three TRT segments.
return ["TRTEngineOp_0", "TRTEngineOp_1", "TRTEngineOp_2", "TRTEngineOp_3"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/quantization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.platform import test
class ConcatenationTest(trt_test.TfTrtIntegrationTestBase):
"""Testing Concatenation in TF-TRT conversion."""
def GraphFn(self, x):
dtype = x.dtype
# scale
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r1 = x / a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r2 = a / x
a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
r3 = a + x
a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
r4 = x * a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r5 = x - a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r6 = a - x
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r7 = x - a
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r8 = a - x
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r9 = gen_math_ops.maximum(x, a)
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r10 = gen_math_ops.minimum(a, x)
a = constant_op.constant(np.random.randn(3), dtype=dtype)
r11 = x * a
a = constant_op.constant(np.random.randn(1), dtype=dtype)
r12 = a * x
concat1 = array_ops.concat([r1, r2, r3, r4, r5, r6], axis=-1)
concat2 = array_ops.concat([r7, r8, r9, r10, r11, r12], axis=3)
x = array_ops.concat([concat1, concat2], axis=-1)
return gen_array_ops.reshape(x, [2, -1], name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 3, 1]],
[[2, 126]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/concatenation_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class VGGBlockTest(trt_test.TfTrtIntegrationTestBase):
"""Single vgg layer test in TF-TRT conversion."""
def GraphFn(self, x):
dtype = x.dtype
x, _, _ = nn_impl.fused_batch_norm(
x, [1.0, 1.0], [0.0, 0.0],
mean=[0.5, 0.5],
variance=[1.0, 1.0],
is_training=False)
e = constant_op.constant(
np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = array_ops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
return array_ops.squeeze(v, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[5, 8, 8, 2]],
[[5, 2, 2, 6]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/vgg_block_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class UnaryTest(trt_test.TfTrtIntegrationTestBase):
"""Test for unary operations in TF-TRT."""
def GraphFn(self, x1, x2):
x = x1
q = math_ops.abs(x)
q = q + 1.0
q = gen_math_ops.exp(q)
q = gen_math_ops.log(q)
q = array_ops.squeeze(q, axis=-2)
q = math_ops.abs(q)
q = q + 2.2
q = gen_math_ops.sqrt(q)
q = gen_math_ops.rsqrt(q)
q = math_ops.negative(q)
q = array_ops.squeeze(q, axis=3)
q = math_ops.abs(q)
q = q + 3.0
a = gen_math_ops.reciprocal(q)
x = constant_op.constant(np.random.randn(5, 8, 12), dtype=x.dtype)
q = math_ops.abs(x)
q = q + 2.0
q = gen_math_ops.exp(q)
q = gen_math_ops.log(q)
q = math_ops.abs(q)
q = q + 2.1
q = gen_math_ops.sqrt(q)
q = gen_math_ops.rsqrt(q)
q = math_ops.negative(q)
q = math_ops.abs(q)
q = q + 4.0
b = gen_math_ops.reciprocal(q)
# TODO(jie): this one will break, broadcasting on batch.
x = x2
q = math_ops.abs(x)
q = q + 5.0
q = gen_math_ops.exp(q)
q = array_ops.squeeze(q, axis=[-1, -2, 3])
q = gen_math_ops.log(q)
q = math_ops.abs(q)
q = q + 5.1
q = gen_array_ops.reshape(q, [12, 5, 1, 1, 8, 1, 12])
q = array_ops.squeeze(q, axis=[5, 2, 3])
q = gen_math_ops.sqrt(q)
q = math_ops.abs(q)
q = q + 5.2
q = gen_math_ops.rsqrt(q)
q = math_ops.negative(q)
q = math_ops.abs(q)
q = q + 5.3
c = gen_math_ops.reciprocal(q)
q = a * b
q = q / c
return array_ops.squeeze(q, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32,
[[12, 5, 8, 1, 1, 12], [12, 5, 8, 1, 12, 1, 1]],
[[12, 5, 8, 12]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/unary_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BatchMatMulTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of BatchMatMul in TF-TRT conversion."""
def GraphFn(self, inp, inp1, inp2):
dtype = inp.dtype
b = constant_op.constant(np.random.randn(12, 5, 12, 7), dtype=dtype)
x1 = math_ops.matmul(inp, b)
c = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)
x1 = x1 + c
x2 = math_ops.matmul(inp, inp1)
d = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)
x2 = x2 * d
e = self.trt_incompatible_op(inp)
e = gen_array_ops.reshape(e, [12, 40, 12])
x3 = math_ops.matmul(e, inp2)
f = constant_op.constant(np.random.randn(40, 1), dtype=dtype)
x3 = x3 + f
x3 = gen_array_ops.reshape(x3, [12, 5, 8, 7])
x3 = self.trt_incompatible_op(x3)
out = x1 + x2 + x3
return array_ops.squeeze(out, name="output_0")
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32,
[[12, 5, 8, 12], [12, 5, 12, 7], [12, 12, 7]],
[[12, 5, 8, 7]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0", "TRTEngineOp_1", "TRTEngineOp_2"]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/batch_matmul_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TRT INT8 conversion without calibration on Mnist model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class DynamicInputShapesTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, x):
conv_filter1 = constant_op.constant(
np.ones([3, 3, 1, 8]), name="weights1", dtype=dtypes.float32)
bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter1,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias1)
x = nn.relu(x)
conv_filter2 = constant_op.constant(
np.ones([3, 3, 8, 1]), name="weights2", dtype=dtypes.float32)
bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter2,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias2)
return array_ops.identity(x, name="output")
def GetParams(self):
# TODO(laigd): we should test the following cases:
# - batch size is not changed, other dims are changing
# - batch size is decreasing, other dims are identical
# - batch size is decreasing, other dims are changing
# - batch size is increasing, other dims are identical
# - batch size is increasing, other dims are changing
input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
[[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
[[1, 224, 224, 1]], [[1, 128, 224, 1]]]
expected_output_dims = input_dims
return trt_test.TfTrtIntegrationTestParams(
graph_fn=self.GraphFn,
input_specs=[
tensor_spec.TensorSpec([None, None, None, 1], dtypes.float32,
"input")
],
output_specs=[
tensor_spec.TensorSpec([None, None, None, 1], dtypes.float32,
"output")
],
input_dims=input_dims,
expected_output_dims=expected_output_dims)
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
conversion_params = super(DynamicInputShapesTest,
self).GetConversionParams(run_params)
return conversion_params._replace(
maximum_cached_engines=10,
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig())
def ExpectedEnginesToBuild(self, run_params):
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
return (run_params.dynamic_engine and
not trt_test.IsQuantizationMode(run_params.precision_mode))
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-03 if run_params.precision_mode == "FP32" else 1.e-01
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-03 if run_params.precision_mode == "FP32" else 1.e-01
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/dynamic_input_shapes_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class NeighboringEngineTest(trt_test.TfTrtIntegrationTestBase):
"""Neighboring node wiring tests in TF-TRT conversion."""
def GraphFn(self, x):
dtype = x.dtype
e = constant_op.constant(
np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = math_ops.mul(conv, b, name="mul")
e = self.trt_incompatible_op(conv, name="incompatible")
t = math_ops.sub(t, e, name="sub")
return array_ops.squeeze(t, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 7, 5]],
[[2, 4, 5, 4]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["bias", "mul", "sub"],
"TRTEngineOp_1": ["weights", "conv"]
}
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/neighboring_engine_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class MemoryAlignmentTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of BatchMatMul in TF-TRT conversion."""
def GraphFn(self, inp):
dtype = inp.dtype
e1 = constant_op.constant(
np.random.randn(1, 1, 3, 5), name="kernel_1", dtype=dtype)
e2 = constant_op.constant(
np.random.randn(1, 1, 5, 10), name="kernel_2", dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=e1,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
out = nn.conv2d(
input=conv,
filter=e2,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv_2")
return array_ops.squeeze(out, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 15, 15, 3]],
[[2, 15, 15, 10]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-06 if run_params.precision_mode == "FP32" else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 0.1
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/memory_alignment_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RankTwoTest(trt_test.TfTrtIntegrationTestBase):
"""Test for rank 2 input in TF-TRT."""
def GraphFn(self, x1, x2):
# Two paths: first with rank 2 input, second with rank 4 input.
outputs = []
xs = [x1, x2]
for i in range(2):
x = xs[i]
c = constant_op.constant(1.0, name="c%d_1" % i)
q = math_ops.add(x, c, name="add%d_1" % i)
q = math_ops.abs(q, name="abs%d_1" % i)
c = constant_op.constant(2.2, name="c%d_2" % i)
q = math_ops.add(q, c, name="add%d_2" % i)
q = math_ops.abs(q, name="abs%d_2" % i)
c = constant_op.constant(3.0, name="c%d_3" % i)
q = math_ops.add(q, c, name="add%d_3" % i)
if i == 0:
axis = constant_op.constant(-1, dtype=dtypes.int32, name="axis")
for j in range(2):
q = array_ops.expand_dims(q, axis, name="expand%d_%d" % (i, j))
q = self.trt_incompatible_op(q)
q = gen_math_ops.reciprocal(q, name="reciprocal%d" % i)
outputs.append(q)
# Combine both paths
q = math_ops.add(outputs[0], outputs[1], name="add")
return array_ops.squeeze(q, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32,
[[12, 5], [12, 5, 2, 2]], [[12, 5, 2, 2]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"add0_1", "add0_2", "add0_3", "c0_1", "c0_2", "c0_3", "abs0_1",
"abs0_2", "expand0_0", "expand0_1", "axis"
],
"TRTEngineOp_1": [
"add", "add1_1", "add1_2", "add1_3", "c1_1", "c1_2", "c1_3",
"abs1_1", "abs1_2", "reciprocal0", "reciprocal1"
],
}
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/rank_two_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import test
class BinaryTensorWeightBroadcastTest(trt_test.TfTrtIntegrationTestBase):
"""Tests for scale & elementwise layers in TF-TRT."""
def _ConstOp(self, shape):
return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32)
def GraphFn(self, x):
for weights_shape in [
(1,), # scale
(24, 1, 1), # scale
(24, 24, 20), # scale
(20,), # elementwise
(1, 24, 1, 1), # elementwise
(1, 24, 24, 1), # elementwise
(1, 24, 24, 20), # elementwise
(24, 20), # elementwise
]:
a = self._ConstOp(weights_shape)
f = x + a
x = self.trt_incompatible_op(f)
a = self._ConstOp(weights_shape)
f = a + x
x = self.trt_incompatible_op(f)
return gen_array_ops.reshape(x, [5, -1], name="output_0")
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[10, 24, 24, 20]],
[[5, 23040]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_%d" % i for i in range(16)]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/binary_tensor_weight_broadcast_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This test checks a situation where the same tensor is considered as an output
multiple times because it has been duplicated by 2+ indentity ops. Previously,
the tensor would be renamed multiple times, overwriting the output binding name
which resulted in a runtime error when the binding would not be found.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class IdentityTest(trt_test.TfTrtIntegrationTestBase):
"""Testing engine with the same tensor repeated as output via identity."""
def _ConstOp(self, shape):
return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32)
def GraphFn(self, x):
b = self._ConstOp((32, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = x1 + b
out1 = array_ops.identity(x1, name='output_0')
out2 = array_ops.identity(x1, name='output_1')
iden1 = array_ops.identity(x1)
out3 = array_ops.identity(iden1, name='output_2')
return [out1, out2, out3]
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 32]],
[[100, 4]] * 3)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ['TRTEngineOp_0']
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/identity_output_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class ConstBroadcastTest(trt_test.TfTrtIntegrationTestBase):
"""Test for Constant broadcasting in TF-TRT."""
def GraphFn(self, x):
dtype = x.dtype
filt1 = constant_op.constant(
0.3, shape=(3, 3, 2, 1), dtype=dtype, name='filt1')
y1 = nn.conv2d(x, filt1, strides=[1, 1, 1, 1], padding='SAME', name='y1')
z1 = nn.relu(y1, name='z1')
filt2 = constant_op.constant(
np.random.randn(9), shape=(3, 3, 1, 1), dtype=dtype, name='filt2')
y2 = nn.conv2d(z1, filt2, strides=[1, 1, 1, 1], padding='SAME', name='y2')
z2 = nn.relu(y2, name='z')
filt3 = constant_op.constant(
np.random.randn(3, 3, 1, 1),
shape=(3, 3, 1, 1),
dtype=dtype,
name='filt3')
y3 = nn.conv2d(z2, filt3, strides=[1, 1, 1, 1], padding='SAME', name='y3')
return nn.relu(y3, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[5, 12, 12, 2]],
[[5, 12, 12, 1]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ['TRTEngineOp_0']
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-04 if run_params.precision_mode == 'FP32' else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-04 if run_params.precision_mode == 'FP32' else 1.e-02
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/const_broadcast_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TensorRT conversion of CombinedNMS op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.platform import test
class CombinedNmsTest(trt_test.TfTrtIntegrationTestBase):
"""Test for CombinedNMS op in TF-TRT."""
def GraphFn(self, boxes, scores):
max_output_size_per_class = 3
max_total_size = 3
score_threshold = 0.1
iou_threshold = 0.5
# Shapes
max_output_size_per_class_tensor = constant_op.constant(
max_output_size_per_class,
dtype=dtypes.int32,
name='max_output_size_per_class')
max_total_size_tensor = constant_op.constant(
max_total_size, dtype=dtypes.int32, name='max_total_size')
iou_threshold_tensor = constant_op.constant(
iou_threshold, dtype=dtypes.float32, name='iou_threshold')
score_threshold_tensor = constant_op.constant(
score_threshold, dtype=dtypes.float32, name='score_threshold')
nms_output = image_ops_impl.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class_tensor,
max_total_size_tensor,
iou_threshold_tensor,
score_threshold_tensor,
name='combined_nms')
return [
array_ops.identity(output, name=('output_%d' % i))
for i, output in enumerate(nms_output)
]
def GetParams(self):
# Parameters
q = 1
batch_size = 1
num_boxes = 200
num_classes = 2
max_total_size = 3
boxes_shape = [batch_size, num_boxes, q, 4]
scores_shape = [batch_size, num_boxes, num_classes]
nmsed_boxes_shape = [batch_size, max_total_size, 4]
nmsed_scores_shape = [batch_size, max_total_size]
nmsed_classes_shape = [batch_size, max_total_size]
valid_detections_shape = [batch_size]
return self.BuildParams(self.GraphFn, dtypes.float32,
[boxes_shape, scores_shape], [
nmsed_boxes_shape, nmsed_scores_shape,
nmsed_classes_shape, valid_detections_shape
])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
'TRTEngineOp_0': [
'combined_nms/CombinedNonMaxSuppression',
'max_output_size_per_class', 'max_total_size', 'iou_threshold',
'score_threshold'
]
}
def ShouldRunTest(self, run_params):
# There is no CombinedNonMaxSuppression op for GPU at the moment, so
# calibration will fail.
# TODO(laigd): fix this.
if trt_test.IsQuantizationMode(run_params.precision_mode):
return False
# Only run for TRT 5.1 and above.
ver = get_linked_tensorrt_version()
return ver[0] > 5 or (ver[0] == 5 and ver[1] >= 1)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/combined_nms_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
def conv2d_layer(inputs,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1)):
dtype = inputs.dtype
c_axis = -1 if data_format == "channels_last" else 1
nchan = inputs.shape[c_axis]
weights_shape = (kernel_size[0], kernel_size[1], nchan, filters)
weights = constant_op.constant(np.random.randn(*weights_shape), dtype=dtype)
padding = padding.upper()
if data_format == "channels_last":
strides = [1] + list(strides) + [1]
dilations = [1] + list(dilation_rate) + [1]
data_format = "NHWC"
else:
strides = [1, 1] + list(strides)
dilations = [1, 1] + list(dilation_rate)
data_format = "NCHW"
return gen_nn_ops.conv2d(
inputs,
weights,
strides=strides,
padding=padding,
dilations=dilations,
data_format=data_format)
def div_round_up(n, d):
return (n - 1) // d + 1
def build_graph(inp,
dtype,
num_filters,
data_format,
kernel_sizes,
dilation_rates,
padding="same"):
results = []
for kernel_size in kernel_sizes:
for dilation_rate in dilation_rates:
result = conv2d_layer(inp, num_filters, kernel_size, (1, 1), padding,
data_format, dilation_rate)
results.append(result)
output = sum(results)
return array_ops.identity(output, name="output_0")
class Conv2DNCHWTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of Conv2D (data_format=NCHW) in TF-TRT conversion."""
def GraphFn(self, inp):
np.random.seed(1234)
return build_graph(
inp=inp,
dtype=dtypes.float32,
num_filters=5,
data_format="channels_first",
kernel_sizes=[(3, 3), (3, 2)],
dilation_rates=[(1, 1), (2, 3)])
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[13, 3, 7, 11]],
[[13, 5, 7, 11]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
class Conv2DNHWCTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of Conv2D (data_format=NCHW) in TF-TRT conversion."""
def GraphFn(self, inp):
np.random.seed(1234)
return build_graph(
inp=inp,
dtype=dtypes.float32,
num_filters=5,
data_format="channels_last",
kernel_sizes=[(3, 3), (3, 2)],
dilation_rates=[(1, 1), (2, 3)])
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[13, 7, 11, 3]],
[[13, 7, 11, 5]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
class Conv2DStridedNCHWTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of strided Conv2D (data_format=NCHW)."""
def GraphFn(self, inp):
np.random.seed(1234)
num_filters = 5
output = inp
output = conv2d_layer(
output,
num_filters, (3, 2),
strides=(2, 2),
padding="same",
data_format="channels_first")
output = conv2d_layer(
output,
num_filters, (3, 3),
strides=(2, 2),
dilation_rate=(2, 3),
padding="same",
data_format="channels_first")
return array_ops.identity(output, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[13, 3, 7, 11]],
[[13, 5, 2, 3]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
class Conv2DTranposeTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of conv2d_transpose (AKA Conv2DBackpropInput)"""
def GraphFn(self, inp):
np.random.seed(1234)
dtype = inp.dtype
n, c, h, w = 13, 3, 7, 11
num_filters = 8
weights_shape = [2, 2, num_filters, c]
weights = constant_op.constant(np.random.randn(*weights_shape), dtype=dtype)
output_shape = constant_op.constant([n, num_filters, h * 2, w * 2],
dtype=dtypes.int32)
output = nn_ops.conv2d_transpose(
inp,
weights,
output_shape,
strides=[1, 1, 2, 2],
padding="SAME",
data_format="NCHW")
return array_ops.identity(output, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[13, 3, 7, 11]],
[[13, 8, 14, 22]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/conv2d_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test LRUCache by running different input batch sizes on same network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class LRUCacheTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, x):
conv_filter = constant_op.constant(
np.random.randn(3, 3, 2, 1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
bias = constant_op.constant(
np.random.randn(1, 10, 10, 1), dtype=dtypes.float32)
x = math_ops.add(x, bias)
x = nn.relu(x)
return array_ops.identity(x, name="output")
def GetParams(self):
dtype = dtypes.float32
input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
[[2, 10, 10, 2]]]
expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]], [[4, 10, 10,
1]],
[[2, 10, 10, 1]]]
return trt_test.TfTrtIntegrationTestParams(
graph_fn=self.GraphFn,
input_specs=[
tensor_spec.TensorSpec([None, 10, 10, 2], dtypes.float32, "input")
],
output_specs=[
tensor_spec.TensorSpec([None, 10, 10, 1], dtypes.float32, "output")
],
input_dims=input_dims,
expected_output_dims=expected_output_dims)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
return (run_params.dynamic_engine and
not trt_test.IsQuantizationMode(run_params.precision_mode))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/lru_cache_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import itertools
import os
import tempfile
import warnings
import numpy as np
import six
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import is_tensorrt_enabled
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
from tensorflow.python.tools import saved_model_utils
TfTrtIntegrationTestParams = namedtuple(
"TfTrtIntegrationTestParams",
[
# A function that creates the TF graph for testing.
"graph_fn",
# A list of specifications for input tensors.
"input_specs",
# A list of specifications for output tensors.
"output_specs",
# A list of list of input shapes. Each shape must match the
# corresponding element in `input_specs`.
"input_dims",
# A list of list of expected output shapes. Each shape must match the
# corresponding element in `output_specs`.
"expected_output_dims"
])
RunParams = namedtuple(
"RunParams",
[
# Whether to run the conversion online with RewriterConfig, or offline
# with TrtGraphConverter.
"convert_online",
"precision_mode",
"dynamic_engine",
"use_calibration",
"test_name",
])
FP32 = "FP32"
FP16 = "FP16"
INT8 = "INT8"
PRECISION_MODES = [FP32, FP16, INT8]
def IsQuantizationMode(mode):
return mode == "INT8"
def IsQuantizationWithCalibration(params):
return IsQuantizationMode(params.precision_mode) and params.use_calibration
class GraphState(object):
ORIGINAL = 0
CALIBRATE = 1
INFERENCE = 2
def OptimizerDisabledRewriterConfig():
"""Returns a RewriterConfig with all default Grappler optimizers disabled."""
rewriter_config = rewriter_config_pb2.RewriterConfig()
# Turn off all default Grappler optimizers.
off = rewriter_config_pb2.RewriterConfig.OFF
rewriter_config.layout_optimizer = off
rewriter_config.constant_folding = off
rewriter_config.shape_optimization = off
rewriter_config.remapping = off
rewriter_config.arithmetic_optimization = off
rewriter_config.dependency_optimization = off
rewriter_config.loop_optimization = off
rewriter_config.function_optimization = off
rewriter_config.debug_stripper = off
rewriter_config.disable_model_pruning = True
rewriter_config.scoped_allocator_optimization = off
rewriter_config.memory_optimization = (
rewriter_config_pb2.RewriterConfig.NO_MEM_OPT)
rewriter_config.pin_to_host_optimization = off
rewriter_config.auto_parallel.enable = False
# Run only once for each enabled optimizer.
rewriter_config.meta_optimizer_iterations = (
rewriter_config_pb2.RewriterConfig.ONE)
return rewriter_config
class TfTrtIntegrationTestBase(test_util.TensorFlowTestCase):
"""Class to test Tensorflow-TensorRT integration."""
@property
def trt_incompatible_op(self):
return math_ops.erf
@property
def precision_modes(self):
return ["FP32", "FP16", "INT8"]
# str is bytes in py2, but unicode in py3.
def _ToUnicode(self, s):
if six.PY2:
if isinstance(s, unicode):
return s
return s.decode("utf-8")
else:
if isinstance(s, str):
return s
return s.decode("utf-8")
def _ToBytes(self, s):
if six.PY2:
if isinstance(s, unicode):
return s.encode("utf-8")
return s
else:
if isinstance(s, str):
return s.encode("utf-8")
return s
def _ToString(self, s):
if six.PY2:
if isinstance(s, unicode):
return s.encode("utf-8")
return s
else:
if isinstance(s, str):
return s
return s.decode("utf-8")
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TfTrtIntegrationTestBase, self).__init__(methodName)
self._trt_test_params = None
def setUp(self):
"""Setup method."""
super(TfTrtIntegrationTestBase, self).setUp()
warnings.simplefilter("always")
def BuildParams(self, graph_fn, dtype, input_shapes, output_shapes):
"""Build test parameters when not considering dynamic shapes."""
def _Validate(shapes):
# Make sure all the shapes are fully specified.
for shape in shapes:
assert all(shape)
_Validate(input_shapes)
_Validate(output_shapes)
return TfTrtIntegrationTestParams(
graph_fn=graph_fn,
# Unset the batch dim of the specs to make sure TRT can tolerate changes
# on that.
input_specs=[
tensor_spec.TensorSpec([None] + shape[1:], dtype, "input_%d" % i)
for i, shape in enumerate(input_shapes)
],
output_specs=[
tensor_spec.TensorSpec([None] + shape[1:], dtype, "output_%d" % i)
for i, shape in enumerate(output_shapes)
],
input_dims=[input_shapes],
expected_output_dims=[output_shapes])
def GetParams(self):
"""Return a TfTrtIntegrationTestParams for test, implemented by subclass."""
raise NotImplementedError()
def GetConversionParams(self, run_params):
"""Return a TrtConversionParams for test."""
batch_list = []
for dims_list in self._GetParamsCached().input_dims:
assert dims_list
# Each list of shapes should have same batch size.
input_batches = [dims[0] for dims in dims_list]
assert max(input_batches) == min(input_batches)
batch_list.append(input_batches[0])
conversion_params = trt_convert.TrtConversionParams(
# We use the minimum of all the batch sizes, so when multiple different
# input shapes are provided it'll always create new engines in the
# cache, and we can therefore test the cache behavior.
rewriter_config_template=None,
max_workspace_size_bytes=1 << 25,
precision_mode=run_params.precision_mode,
minimum_segment_size=2,
is_dynamic_op=run_params.dynamic_engine,
maximum_cached_engines=1,
use_calibration=run_params.use_calibration,
use_function_backup=False,
max_batch_size=min(batch_list),
cached_engine_batches=None)
return conversion_params._replace(
use_function_backup=IsQuantizationWithCalibration(conversion_params))
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# This setting combination requires quantization nodes to be present in
# order to build the engine.
return (run_params.use_calibration or
not IsQuantizationMode(run_params.precision_mode))
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build, implemented by subclass."""
raise NotImplementedError()
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-02
def _GetParamsCached(self):
if self._trt_test_params is None:
self._trt_test_params = self.GetParams()
return self._trt_test_params
def _GetGPUOptions(self):
gpu_options = config_pb2.GPUOptions()
gpu_options.allow_growth = True
return gpu_options
def _GetConfigProto(self, run_params, graph_state):
"""Get config proto based on specific settings."""
conversion_params = self.GetConversionParams(run_params)
if graph_state == GraphState.INFERENCE and run_params.convert_online:
rewriter_cfg = trt_convert.get_tensorrt_rewriter_config(conversion_params)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_cfg)
else:
graph_options = config_pb2.GraphOptions()
if conversion_params.rewriter_config_template is not None:
graph_options.rewrite_options.CopyFrom(
conversion_params.rewriter_config_template)
config = config_pb2.ConfigProto(
gpu_options=self._GetGPUOptions(), graph_options=graph_options)
return config
def _GetFeedNames(self):
params = self._GetParamsCached()
# Construct the feeds tensor names by appending :0 to the node names.
return [spec.name + ":0" for spec in params.input_specs]
def _GetFetchNames(self):
params = self._GetParamsCached()
# Construct the fetches tensor names by appending :0 to the node names.
return [spec.name + ":0" for spec in params.output_specs]
def _GetFeedDict(self, inputs_data):
return {name: data for name, data in zip(self._GetFeedNames(), inputs_data)}
def _RunGraph(self,
run_params,
saved_model_dir,
inputs_data,
config,
graph_state,
num_runs=2):
"""Run given graphdef multiple times."""
params = self._GetParamsCached()
for data in inputs_data:
assert len(params.input_specs) == len(data)
fetches = self._GetFetchNames()
g = ops.Graph()
with g.as_default():
with self.session(graph=g, config=config, use_gpu=True) as sess:
loader.load(sess, [tag_constants.SERVING], saved_model_dir)
vals = []
# Run for each input(s) shape
for expected_shapes, current_input_data in zip(
params.expected_output_dims, inputs_data):
val = None
for _ in range(num_runs):
new_val = sess.run(fetches, self._GetFeedDict(current_input_data))
self.assertEqual(len(expected_shapes), len(new_val))
for expected_shape, actual_val in zip(expected_shapes, new_val):
self.assertEqual(list(expected_shape), list(actual_val.shape))
if val is not None:
self.assertAllClose(val, new_val, atol=1.e-06, rtol=1.e-06)
val = new_val
vals.append(val)
return vals
def _CreateConverter(self, saved_model_dir, session_config,
conversion_params):
"""Return a TrtGraphConverter."""
converter = trt_convert.TrtGraphConverter(
input_saved_model_dir=saved_model_dir,
session_config=session_config,
max_batch_size=conversion_params.max_batch_size,
max_workspace_size_bytes=conversion_params.max_workspace_size_bytes,
precision_mode=conversion_params.precision_mode,
minimum_segment_size=conversion_params.minimum_segment_size,
is_dynamic_op=conversion_params.is_dynamic_op,
maximum_cached_engines=conversion_params.maximum_cached_engines,
cached_engine_batches=conversion_params.cached_engine_batches,
use_calibration=conversion_params.use_calibration,
use_function_backup=conversion_params.use_function_backup)
return converter
def _GetCalibratedInferGraph(self, run_params, saved_model_dir, inputs_data):
"""Return trt converted graphdef in INT8 mode."""
conversion_params = self.GetConversionParams(run_params)
logging.info(conversion_params)
assert conversion_params.precision_mode == "INT8"
assert conversion_params.is_dynamic_op
assert conversion_params.maximum_cached_engines == 1
assert not conversion_params.cached_engine_batches
assert conversion_params.use_calibration
# We only support calibrating single engine.
# TODO(aaroey): fix this.
assert len(inputs_data) == 1
session_config = self._GetConfigProto(run_params, GraphState.CALIBRATE)
logging.info("Running calibration graph, config:\n%s", str(session_config))
converter = self._CreateConverter(saved_model_dir, session_config,
conversion_params)
int8_gdef = converter.convert()
self._VerifyGraphDef(run_params, saved_model_dir, int8_gdef,
GraphState.CALIBRATE)
converter.calibrate(
fetch_names=self._GetFetchNames(),
num_runs=5,
feed_dict_fn=lambda: self._GetFeedDict(inputs_data[0]))
trt_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
converter.save(trt_saved_model_dir)
return trt_saved_model_dir
def _GetInferGraph(self, run_params, saved_model_dir):
"""Return trt converted graphdef."""
conversion_params = self.GetConversionParams(run_params)
logging.info(conversion_params)
session_config = self._GetConfigProto(run_params, GraphState.INFERENCE)
logging.info("Creating TRT graph for inference, config\n%s",
str(session_config))
converter = self._CreateConverter(saved_model_dir, session_config,
conversion_params)
converter.convert()
trt_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
converter.save(trt_saved_model_dir)
return trt_saved_model_dir
def _WriteGraph(self, run_params, gdef, graph_state):
if graph_state == GraphState.ORIGINAL:
label = "Original"
elif graph_state == GraphState.CALIBRATE:
label = "CalibEngine"
elif graph_state == GraphState.INFERENCE:
label = "InferEngine"
graph_name = (
self.__class__.__name__ + "_" + run_params.test_name + "_" + label +
".pbtxt")
temp_dir = os.getenv("TRT_TEST_TMPDIR", self.get_temp_dir())
if temp_dir:
logging.info("Writing graph to %s/%s", temp_dir, graph_name)
graph_io.write_graph(gdef, temp_dir, graph_name)
def _VerifyConnections(self, expected_engines, original_gdef, converted_gdef):
old_to_new_node_map = {
self._ToString(node.name): self._ToString(node.name)
for node in original_gdef.node
}
for engine_name, node_names in expected_engines.items():
for node_name in node_names:
old_to_new_node_map[node_name] = engine_name
name_to_node_map = {
self._ToString(node.name): node for node in original_gdef.node
}
def _InputName(inp):
inp = self._ToString(inp)
prefix = ""
if inp[0] == "^":
prefix = "^"
inp = inp[1:]
parts = inp.split(":")
if len(parts) > 1 and parts[-1].isdigit():
inp = inp[:-len(parts[-1]) - 1]
return (prefix, inp)
# Compute the expected mapping from each node to its input nodes.
expected_input_map = {}
removed_const_nodes = set([
self._ToString(node.name)
for node in original_gdef.node
if node.op == "Const"
])
for node in original_gdef.node:
name_str = self._ToString(node.name)
target_node_name = old_to_new_node_map[name_str]
is_engine_op = (target_node_name != name_str)
if target_node_name not in expected_input_map:
expected_input_map[target_node_name] = set()
input_set = expected_input_map[target_node_name]
for inp in node.input:
(prefix, inp_name) = _InputName(inp)
mapped_input = old_to_new_node_map[inp_name]
# Add the input only if it's outside the segment (note that it could be
# in a different engine).
if not is_engine_op or (mapped_input != target_node_name and
name_to_node_map[inp_name].op != "Const"):
input_set.add(prefix + mapped_input)
if mapped_input in removed_const_nodes:
removed_const_nodes.remove(mapped_input)
# Remove const nodes that have no outputs.
expected_input_map = {
k: v
for k, v in expected_input_map.items()
if k not in removed_const_nodes
}
# Compute the actual mapping from each node to its input nodes.
actual_input_map = {}
for node in converted_gdef.node:
name_str = self._ToString(node.name)
actual_input_map[name_str] = set()
input_set = actual_input_map[name_str]
for inp in node.input:
(prefix, node_name) = _InputName(inp)
input_set.add(prefix + node_name)
self.assertEqual(
expected_input_map,
actual_input_map,
msg="\nexpected:\n%s\nvs actual:\n%s" %
(sorted(expected_input_map.items()), sorted(actual_input_map.items())))
def _GetGraphDef(self, gdef_or_saved_model_dir):
if isinstance(gdef_or_saved_model_dir, str):
return saved_model_utils.get_meta_graph_def(
gdef_or_saved_model_dir, tag_constants.SERVING).graph_def
assert isinstance(gdef_or_saved_model_dir, graph_pb2.GraphDef)
return gdef_or_saved_model_dir
def _VerifyGraphDef(self, run_params, original_gdef_or_saved_model_dir,
gdef_or_saved_model_dir_to_verify, graph_state):
original_gdef = self._GetGraphDef(original_gdef_or_saved_model_dir)
gdef_to_verify = self._GetGraphDef(gdef_or_saved_model_dir_to_verify)
self._WriteGraph(run_params, gdef_to_verify, graph_state)
expected_engines = self.ExpectedEnginesToBuild(run_params)
num_engines = 0
functions = [f.signature.name for f in gdef_to_verify.library.function]
for node in gdef_to_verify.node:
if node.op == "TRTEngineOp":
logging.info("Found TRTEngineOp: " + node.name)
num_engines += 1
segment_funcdef_name = node.attr["segment_funcdef_name"].s
function_name = node.name + "_native_segment"
if IsQuantizationWithCalibration(run_params):
self.assertNotEmpty(segment_funcdef_name, node.name)
self.assertIn(function_name, functions)
else:
self.assertEmpty(segment_funcdef_name, node.name)
self.assertNotIn(function_name, functions)
self.assertIn(node.name, expected_engines)
self.assertTrue(len(node.attr["serialized_segment"].s), node.name)
self.assertEqual(
self._ToBytes(run_params.precision_mode),
node.attr["precision_mode"].s, node.name)
is_dynamic_engine = not node.attr["static_engine"].b
self.assertEqual(run_params.dynamic_engine, is_dynamic_engine,
node.name)
self.assertEqual(node.attr["use_calibration"].b,
run_params.use_calibration, node.name)
has_calibration_data = len(node.attr["calibration_data"].s)
if (IsQuantizationWithCalibration(run_params) and
graph_state == GraphState.INFERENCE):
self.assertTrue(has_calibration_data, node.name)
else:
self.assertFalse(has_calibration_data, node.name)
if graph_state == GraphState.ORIGINAL:
self.assertEqual(0, num_engines)
else:
self.assertEqual(num_engines, len(expected_engines))
if isinstance(expected_engines, dict):
self._VerifyConnections(expected_engines, original_gdef, gdef_to_verify)
# TODO(aaroey): consider verifying the corresponding TF function.
def _MakeSavedModel(self, run_params):
"""Write the saved model as an input for testing."""
params = self._GetParamsCached()
g = ops.Graph()
with g.as_default():
inputs = []
for spec in params.input_specs:
inp = array_ops.placeholder(
dtype=spec.dtype, shape=spec.shape, name=spec.name)
inputs.append(inp)
outputs = params.graph_fn(*inputs)
if not isinstance(outputs, list) and not isinstance(outputs, tuple):
outputs = [outputs]
for spec, output in zip(params.output_specs, outputs):
assert spec.name == output.name.split(":")[0]
signature_def = signature_def_utils.build_signature_def(
inputs={inp.op.name: utils.build_tensor_info(inp) for inp in inputs},
outputs={out.op.name: utils.build_tensor_info(out) for out in outputs},
method_name=signature_constants.PREDICT_METHOD_NAME)
saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
saved_model_builder = builder.SavedModelBuilder(saved_model_dir)
with self.session(
graph=g, config=self._GetConfigProto(run_params,
GraphState.ORIGINAL)) as sess:
saved_model_builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def
})
saved_model_builder.save()
return saved_model_dir
def RunTest(self, run_params):
if not self.ShouldRunTest(run_params):
return
saved_model_dir = self._MakeSavedModel(run_params)
np.random.seed(12345) # Fix the seed so the test is deterministic.
inputs_data = []
input_specs = self._GetParamsCached().input_specs
for dim_list in self._GetParamsCached().input_dims:
assert len(input_specs) == len(dim_list)
current_input_data = []
for spec, np_shape in zip(input_specs, dim_list):
np_dtype = spec.dtype.as_numpy_dtype()
# Multiply the input by some constant to avoid all zeros input for
# integer types.
scale = 10.0 if np.issubdtype(np_dtype, np.integer) else 1.0
# TODO(laigd): add debug options. E.g. we can set the input data to be
# continuous natural numbers:
# seq = np.arange(np.prod(np_shape))
# seq.resize(np_shape)
# inputs_data.append(scale * seq.astype(np_dtype))
current_input_data.append(
(scale * np.random.random_sample(np_shape)).astype(np_dtype))
inputs_data.append(current_input_data)
# Verify original graph.
self._VerifyGraphDef(run_params, saved_model_dir, saved_model_dir,
GraphState.ORIGINAL)
# Run original graph without trt to get reference result.
config_no_trt = self._GetConfigProto(run_params, GraphState.ORIGINAL)
logging.info("Running original graph w/o trt, config:\n%s",
str(config_no_trt))
ref_result = self._RunGraph(run_params, saved_model_dir, inputs_data,
config_no_trt, GraphState.ORIGINAL)
# Run calibration if necessary.
if IsQuantizationWithCalibration(run_params):
infer_saved_model_dir = self._GetCalibratedInferGraph(
run_params, saved_model_dir, inputs_data)
self._VerifyGraphDef(run_params, saved_model_dir, infer_saved_model_dir,
GraphState.INFERENCE)
elif not run_params.convert_online:
infer_saved_model_dir = self._GetInferGraph(run_params, saved_model_dir)
self._VerifyGraphDef(run_params, saved_model_dir, infer_saved_model_dir,
GraphState.INFERENCE)
else:
infer_saved_model_dir = saved_model_dir
# Run inference.
infer_config = self._GetConfigProto(run_params, GraphState.INFERENCE)
logging.info("Running final inference graph, config:\n%s",
str(infer_config))
result = self._RunGraph(run_params, infer_saved_model_dir, inputs_data,
infer_config, GraphState.INFERENCE)
self.assertAllClose(
ref_result,
result,
atol=self.ExpectedAbsoluteTolerance(run_params),
rtol=self.ExpectedRelativeTolerance(run_params))
def testIdempotence(self):
# Test that applying tensorrt optimizer or offline conversion tools multiple
# times to the same graph will result in same graph.
#
# TODO(aaroey): implement this.
pass
def _AddTests(test_class):
"""Adds test methods to TfTrtIntegrationTestBase."""
def _GetTestConfigs():
"""Returns the config combinations to run the test."""
convert_online, convert_offline = True, False
dynamic_engine, static_engine = True, False
use_calibration, no_calibration = True, False
# Add all possible test cases and let the derived test class to decide
# whether to run specific ones with ShouldRunTest().
#
# Note: INT8 without calibration behaves like FP32/FP16.
opts = list(
itertools.product([FP32, FP16, INT8], [convert_online, convert_offline],
[dynamic_engine, static_engine], [no_calibration]))
# We always run calibration with offline tool.
# TODO(aaroey): static calibration engine is not supported yet.
opts.append((INT8, convert_offline, dynamic_engine, use_calibration))
return opts
def _GetTest(run_params):
"""Gets a single test method based on the parameters."""
@test_util.deprecated_graph_mode_only
def _Test(self):
logging.info(
"Running TFv1 test %s with parameters: convert_online=%s, "
"precision_mode=%s, dynamic_engine=%s",
"testTfTrt_" + run_params.test_name, run_params.convert_online,
run_params.precision_mode, run_params.dynamic_engine)
self.RunTest(run_params)
return _Test
opts = _GetTestConfigs()
for (precision_mode, convert_online, dynamic_engine, use_calibration) in opts:
conversion = "OnlineConversion" if convert_online else "OfflineConversion"
engine_type = "DynamicEngine" if dynamic_engine else "StaticEngine"
calibration_type = "UseCalibration" if use_calibration else "NoCalibration"
test_name = "%s_%s_%s_%s" % (conversion, engine_type, precision_mode,
calibration_type)
run_params = RunParams(
convert_online=convert_online,
precision_mode=precision_mode,
dynamic_engine=dynamic_engine,
test_name=test_name,
use_calibration=use_calibration)
setattr(test_class, "testTfTrt_" + test_name, _GetTest(run_params))
if is_tensorrt_enabled():
_AddTests(TfTrtIntegrationTestBase)
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class BiasaddMatMulTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of BiasAdd MatMul in TF-TRT conversion."""
def _ConstOp(self, shape):
return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32)
def GraphFn(self, x):
input_matrix_rows = 4
input_matrix_columns = 144
b = self._ConstOp((input_matrix_columns, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = x1 + b
b = self._ConstOp((input_matrix_rows, 144))
x2 = self.trt_incompatible_op(x)
x2 = math_ops.matmul(x2, b, transpose_a=True)
x2 = gen_array_ops.reshape(x2, [4, -1])
x2 = self.trt_incompatible_op(x2)
b = self._ConstOp((4, input_matrix_columns))
x3 = math_ops.matmul(x, b, transpose_b=True)
b = self._ConstOp((16, input_matrix_rows))
x4 = self.trt_incompatible_op(x)
x4 = math_ops.matmul(x4, b, transpose_b=True, transpose_a=True)
x4 = gen_array_ops.reshape(x4, [4, -1])
x4 = self.trt_incompatible_op(x4)
# Note that tf.nn.bias_add supports up to 5 dimensions.
b = self._ConstOp((input_matrix_columns, 48))
x5 = math_ops.matmul(x, b)
b = self._ConstOp((48,))
x5 = nn.bias_add(x5, b)
x5 = gen_array_ops.reshape(x5, [4, -1])
x6 = gen_array_ops.reshape(x, [4, 24, 6])
b = self._ConstOp((6,))
x6 = nn.bias_add(x6, b, data_format="NHWC")
x6 = gen_array_ops.reshape(x6, [4, -1])
x7 = gen_array_ops.reshape(x, [4, 12, 4, 3])
b = self._ConstOp((3,))
x7 = nn.bias_add(x7, b, data_format="NHWC")
x7 = gen_array_ops.reshape(x7, [4, -1])
x8 = gen_array_ops.reshape(x, [4, 4, 3, 2, 6])
b = self._ConstOp((6,))
x8 = nn.bias_add(x8, b, data_format="NHWC")
x8 = gen_array_ops.reshape(x8, [4, -1])
x9 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
b = self._ConstOp((12,))
x9 = nn.bias_add(x9, b, data_format="NCHW")
x9 = gen_array_ops.reshape(x9, [4, -1])
x10 = gen_array_ops.reshape(x, [4, 3, 4, 12])
b = self._ConstOp((3,))
x10 = nn.bias_add(x10, b, data_format="NCHW")
x10 = gen_array_ops.reshape(x10, [4, -1])
x11 = gen_array_ops.reshape(x, [4, 6, 24])
b = self._ConstOp((6,))
x11 = nn.bias_add(x11, b, data_format="NCHW")
x11 = gen_array_ops.reshape(x11, [4, -1])
out = array_ops.concat([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11],
axis=-1)
return array_ops.squeeze(out, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[4, 144]],
[[4, 6680]])
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
conversion_params = super(BiasaddMatMulTest,
self).GetConversionParams(run_params)
return conversion_params._replace(
max_batch_size=4,
maximum_cached_engines=1,
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig())
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class MultiConnectionNeighborEngineTest(trt_test.TfTrtIntegrationTestBase):
"""Test for multi connection neighboring nodes wiring tests in TF-TRT."""
def GraphFn(self, x):
dtype = x.dtype
e = constant_op.constant(
np.random.normal(.05, .005, [3, 2, 3, 4]), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = conv + b
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
q = conv - b
edge = self.trt_incompatible_op(q)
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
d = b + conv
edge3 = self.trt_incompatible_op(d)
edge1 = self.trt_incompatible_op(conv)
t = t - edge1
q = q + edge
t = t + q
t = t + d
t = t - edge3
return array_ops.squeeze(t, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 7, 5]],
[[2, 4, 5, 4]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0", "TRTEngineOp_1"]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/multi_connection_neighbor_engine_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TopKTest(trt_test.TfTrtIntegrationTestBase):
"""Testing Top-K in TF-TRT conversion."""
def GraphFn(self, x):
k = 5
k_tensor = constant_op.constant(k, dtype=dtypes.int32, name="Const")
values, indices = nn_ops.top_k(x, k_tensor, name="TopK")
values = array_ops.identity(values, name="output_0")
indices = array_ops.identity(indices, name="output_1")
return values, indices
def GetParams(self):
k = 5
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 100]],
[[100, k], [100, k]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {"TRTEngineOp_0": ["Const", "TopK"]}
class TopKOutputTypeTest(trt_test.TfTrtIntegrationTestBase):
"""Testing that output type of engine using Top-K is set correctly."""
def GraphFn(self, x):
k = 5
k_tensor = constant_op.constant(k, dtype=dtypes.int32, name="Const")
values, indices = nn_ops.top_k(x, k_tensor, name="TopK")
# Reshape will act as a layer between the TopK output and the engine
# output, requiring the output tensor of reshape to be set explicitly to
# int32.
indices = array_ops.reshape(indices, [100, 1, 5], name="Reshape")
values = array_ops.identity(values, name="output_0")
indices = array_ops.identity(indices, name="output_1")
return values, indices
def GetParams(self):
k = 5
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 100]],
[[100, k], [100, 1, k]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {"TRTEngineOp_0": ["Const", "TopK", "Reshape", "Reshape/shape"]}
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/topk_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test conversion of graphs involving INT32 tensors and operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class ExcludeUnsupportedInt32Test(trt_test.TfTrtIntegrationTestBase):
"""Test exclusion of ops which are not supported in INT32 mode by TF-TRT"""
def _ConstOp(self, shape, dtype):
return constant_op.constant(np.random.randn(*shape), dtype=dtype)
def GraphFn(self, x):
dtype = x.dtype
b = self._ConstOp((4, 10), dtype)
x = math_ops.matmul(x, b)
b = self._ConstOp((10,), dtype)
x = nn.bias_add(x, b)
return array_ops.identity(x, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[100, 4]], [[100, 10]])
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
conversion_params = super(ExcludeUnsupportedInt32Test,
self).GetConversionParams(run_params)
return conversion_params._replace(
max_batch_size=100,
maximum_cached_engines=1,
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig())
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return []
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/int32_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ReshapeTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
outputs = []
# Here we test two types of reshapes, one changes the batch dimension and
# the other does not. Note that we're not able to test reshaping to
# scalar, since TRT requires input tensor to be of rank at least 2, so a
# reshape with scalar input will be filtered out of the segment before
# conversion.
#
# These reshapes happen at batch dimension, thus conversion should fail.
for shape in [[2, 50, 24, 24, 2], [-1, 50, 24, 24, 2], [2, 50, -1, 24, 2]]:
incompatible_reshape = array_ops.reshape(inp, shape)
reshape_back = array_ops.reshape(incompatible_reshape, [-1, 24, 24, 2])
outputs.append(self.trt_incompatible_op(reshape_back))
# Add another block with many reshapes that don't change the batch
# dimension.
compatible_reshape = array_ops.reshape(
inp, [-1, 24 * 24, 2], name="reshape-0")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24, -1], name="reshape-1")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24 * 2, 24], name="reshape-2")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24 * 2], name="reshape-3")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 24, 2], name="reshape-4")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 6, 4, 2, 1], name="reshape-5")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24, 2], name="reshape-6")
outputs.append(self.trt_incompatible_op(compatible_reshape))
return math_ops.add_n(outputs, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 24, 24, 2]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["reshape-%d" % i for i in range(7)] +
["reshape-%d/shape" % i for i in range(7)]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
class TransposeTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
# Add a block with compatible transposes.
compatible_transpose = array_ops.transpose(
inp, [0, 3, 1, 2], name="transpose-1")
compatible_transpose = array_ops.transpose(
compatible_transpose, [0, 2, 3, 1], name="transposeback")
# Add an incompatible op so the first block will not be in the same
# subgraph where the following block belongs.
bridge = self.trt_incompatible_op(compatible_transpose)
# Add a block with incompatible transposes.
#
# Note: by default Grappler will run the TRT optimizer twice. At the
# first time it will group the two transpose ops below to same segment
# then fail the conversion due to the expected batch dimension problem.
# At the second time, since the input of bridge op is TRTEngineOp_0, it
# will fail to do shape inference which then cause conversion to fail.
# TODO(laigd): support shape inference, make TRT optimizer run only
# once, and fix this.
incompatible_transpose = array_ops.transpose(
bridge, [2, 1, 0, 3], name="transpose-2")
excluded_transpose = array_ops.transpose(
incompatible_transpose, [0, 2, 3, 1], name="transpose-3")
return array_ops.identity(excluded_transpose, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[24, 100, 2, 24]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"transpose-1", "transpose-1/perm", "transposeback",
"transposeback/perm"
]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/reshape_transpose_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class VGGBlockNCHWTest(trt_test.TfTrtIntegrationTestBase):
"""Single vgg layer in NCHW unit tests in TF-TRT."""
def GraphFn(self, x):
dtype = x.dtype
x, _, _ = nn_impl.fused_batch_norm(
x, [1.0, 1.0], [0.0, 0.0],
mean=[0.5, 0.5],
variance=[1.0, 1.0],
data_format="NCHW",
is_training=False)
e = constant_op.constant(
np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 2, 2],
padding="SAME",
name="conv")
b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
relu = nn.relu(t, "relu")
idty = array_ops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 1, 2, 2], [1, 1, 2, 2],
"VALID",
data_format="NCHW",
name="max_pool")
return array_ops.squeeze(v, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[5, 2, 8, 8]],
[[5, 6, 2, 2]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/vgg_block_nchw_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TRT INT8 conversion without calibration on Mnist model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import is_tensorrt_enabled
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.model_fn import ModeKeys
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import saver
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.checkpoint_management import latest_checkpoint
from tensorflow.python.training.training_util import get_global_step
INPUT_NODE_NAME = 'input'
OUTPUT_NODE_NAME = 'output'
class QuantizationAwareTrainingMNISTTest(test_util.TensorFlowTestCase):
def _BuildGraph(self, x):
def _Quantize(x, r):
x = gen_array_ops.quantize_and_dequantize_v2(x, -r, r)
return x
def _DenseLayer(x, num_inputs, num_outputs, quantization_range, name):
"""Dense layer with quantized outputs.
Args:
x: input to the dense layer
num_inputs: number of input columns of x
num_outputs: number of output columns
quantization_range: the min/max range for quantization
name: name of the variable scope
Returns:
The output of the layer.
"""
with variable_scope.variable_scope(name):
kernel = variable_scope.get_variable(
'kernel',
shape=[num_inputs, num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.glorot_uniform())
bias = variable_scope.get_variable(
'bias',
shape=[num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.zeros())
x = math_ops.matmul(x, kernel)
x = _Quantize(x, quantization_range)
x = nn.bias_add(x, bias)
x = _Quantize(x, quantization_range)
return x
x = _Quantize(x, 1)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=32, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=64, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Reduce
x = math_ops.reduce_mean(x, [1, 2])
x = _Quantize(x, 6)
# FC1
x = _DenseLayer(x, 64, 512, 6, name='dense')
x = nn.relu6(x)
# FC2
x = _DenseLayer(x, 512, 10, 25, name='dense_1')
x = array_ops.identity(x, name=OUTPUT_NODE_NAME)
return x
def _GetGraphDef(self, use_trt, max_batch_size, model_dir):
"""Get the frozen mnist GraphDef.
Args:
use_trt: whether use TF-TRT to convert the graph.
max_batch_size: the max batch size to apply during TF-TRT conversion.
model_dir: the model directory to load the checkpoints.
Returns:
The frozen mnist GraphDef.
"""
graph = ops.Graph()
with self.session(graph=graph) as sess:
with graph.device('/GPU:0'):
x = array_ops.placeholder(
shape=(None, 28, 28, 1), dtype=dtypes.float32, name=INPUT_NODE_NAME)
self._BuildGraph(x)
# Load weights
mnist_saver = saver.Saver()
checkpoint_file = latest_checkpoint(model_dir)
mnist_saver.restore(sess, checkpoint_file)
# Freeze
graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names=[OUTPUT_NODE_NAME])
# Convert with TF-TRT
if use_trt:
logging.info('Number of nodes before TF-TRT conversion: %d',
len(graph_def.node))
converter = trt_convert.TrtGraphConverter(
input_graph_def=graph_def,
nodes_blacklist=[OUTPUT_NODE_NAME],
max_batch_size=max_batch_size,
precision_mode='INT8',
# There is a 2GB GPU memory limit for each test, so we set
# max_workspace_size_bytes to 256MB to leave enough room for TF
# runtime to allocate GPU memory.
max_workspace_size_bytes=1 << 28,
minimum_segment_size=2,
use_calibration=False,
use_function_backup=False)
graph_def = converter.convert()
logging.info('Number of nodes after TF-TRT conversion: %d',
len(graph_def.node))
num_engines = len(
[1 for n in graph_def.node if str(n.op) == 'TRTEngineOp'])
self.assertEqual(1, num_engines)
return graph_def
def _Run(self, is_training, use_trt, batch_size, num_epochs, model_dir):
"""Train or evaluate the model.
Args:
is_training: whether to train or evaluate the model. In training mode,
quantization will be simulated where the quantize_and_dequantize_v2 are
placed.
use_trt: if true, use TRT INT8 mode for evaluation, which will perform
real quantization. Otherwise use native TensorFlow which will perform
simulated quantization. Ignored if is_training is True.
batch_size: batch size.
num_epochs: how many epochs to train. Ignored if is_training is False.
model_dir: where to save or load checkpoint.
Returns:
The Estimator evaluation result.
"""
# Get dataset
train_data, test_data = mnist.load_data()
def _PreprocessFn(x, y):
x = math_ops.cast(x, dtypes.float32)
x = array_ops.expand_dims(x, axis=2)
x = 2.0 * (x / 255.0) - 1.0
y = math_ops.cast(y, dtypes.int32)
return x, y
def _EvalInputFn():
mnist_x, mnist_y = test_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=1)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _TrainInputFn():
mnist_x, mnist_y = train_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.shuffle(2 * len(mnist_x))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=num_epochs)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _ModelFn(features, labels, mode):
if is_training:
logits_out = self._BuildGraph(features)
else:
graph_def = self._GetGraphDef(use_trt, batch_size, model_dir)
logits_out = importer.import_graph_def(
graph_def,
input_map={INPUT_NODE_NAME: features},
return_elements=[OUTPUT_NODE_NAME + ':0'],
name='')[0]
loss = losses.sparse_softmax_cross_entropy(
labels=labels, logits=logits_out)
summary.scalar('loss', loss)
classes_out = math_ops.argmax(logits_out, axis=1, name='classes_out')
accuracy = metrics.accuracy(
labels=labels, predictions=classes_out, name='acc_op')
summary.scalar('accuracy', accuracy[1])
if mode == ModeKeys.EVAL:
return EstimatorSpec(
mode, loss=loss, eval_metric_ops={'accuracy': accuracy})
elif mode == ModeKeys.TRAIN:
optimizer = AdamOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss, global_step=get_global_step())
return EstimatorSpec(mode, loss=loss, train_op=train_op)
config_proto = config_pb2.ConfigProto()
config_proto.gpu_options.allow_growth = True
estimator = Estimator(
model_fn=_ModelFn,
model_dir=model_dir if is_training else None,
config=RunConfig(session_config=config_proto))
if is_training:
estimator.train(_TrainInputFn)
results = estimator.evaluate(_EvalInputFn)
logging.info('accuracy: %s', str(results['accuracy']))
return results
# To generate the checkpoint, set a different model_dir and call self._Run()
# by setting is_training=True and num_epochs=1000, e.g.:
# model_dir = '/tmp/quantization_mnist'
# self._Run(
# is_training=True,
# use_trt=False,
# batch_size=128,
# num_epochs=100,
# model_dir=model_dir)
def testEval(self):
if not is_tensorrt_enabled():
return
model_dir = test.test_src_dir_path('python/compiler/tensorrt/test/testdata')
accuracy_tf_native = self._Run(
is_training=False,
use_trt=False,
batch_size=128,
num_epochs=None,
model_dir=model_dir)['accuracy']
logging.info('accuracy_tf_native: %f', accuracy_tf_native)
self.assertAllClose(0.9662, accuracy_tf_native, rtol=3e-3, atol=3e-3)
if get_linked_tensorrt_version()[0] < 5:
return
accuracy_tf_trt = self._Run(
is_training=False,
use_trt=True,
batch_size=128,
num_epochs=None,
model_dir=model_dir)['accuracy']
logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/compiler/tensorrt/test/quantization_mnist_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.summary import plugin_asset
class _UnnamedPluginAsset(plugin_asset.PluginAsset):
"""An example asset with a dummy serialize method provided, but no name."""
def assets(self):
return {}
class _ExamplePluginAsset(_UnnamedPluginAsset):
"""Simple example asset."""
plugin_name = "_ExamplePluginAsset"
class _OtherExampleAsset(_UnnamedPluginAsset):
"""Simple example asset."""
plugin_name = "_OtherExampleAsset"
class _ExamplePluginThatWillCauseCollision(_UnnamedPluginAsset):
plugin_name = "_ExamplePluginAsset"
class PluginAssetTest(test_util.TensorFlowTestCase):
def testGetPluginAsset(self):
epa = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
self.assertIsInstance(epa, _ExamplePluginAsset)
epa2 = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
self.assertIs(epa, epa2)
opa = plugin_asset.get_plugin_asset(_OtherExampleAsset)
self.assertIsNot(epa, opa)
def testUnnamedPluginFails(self):
with self.assertRaises(ValueError):
plugin_asset.get_plugin_asset(_UnnamedPluginAsset)
def testPluginCollisionDetected(self):
plugin_asset.get_plugin_asset(_ExamplePluginAsset)
with self.assertRaises(ValueError):
plugin_asset.get_plugin_asset(_ExamplePluginThatWillCauseCollision)
def testGetAllPluginAssets(self):
epa = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
opa = plugin_asset.get_plugin_asset(_OtherExampleAsset)
self.assertItemsEqual(plugin_asset.get_all_plugin_assets(), [epa, opa])
def testRespectsGraphArgument(self):
g1 = ops.Graph()
g2 = ops.Graph()
e1 = plugin_asset.get_plugin_asset(_ExamplePluginAsset, g1)
e2 = plugin_asset.get_plugin_asset(_ExamplePluginAsset, g2)
self.assertEqual(e1, plugin_asset.get_all_plugin_assets(g1)[0])
self.assertEqual(e2, plugin_asset.get_all_plugin_assets(g2)[0])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/summary/plugin_asset_test.py
|
tensorflow-master
|
tensorflow/python/summary/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for writing summary data, for use in analysis and visualization.
See the [Summaries and
TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import json_format as _json_format
# exports Summary, SummaryDescription, Event, TaggedRunMetadata, SessionLog
# pylint: disable=unused-import
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.framework.summary_pb2 import SummaryDescription
from tensorflow.core.framework.summary_pb2 import SummaryMetadata as _SummaryMetadata # pylint: enable=unused-import
from tensorflow.core.util.event_pb2 import Event
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.core.util.event_pb2 import TaggedRunMetadata
# pylint: enable=unused-import
from tensorflow.python.distribute import summary_op_util as _distribute_summary_op_util
from tensorflow.python.eager import context as _context
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import gen_logging_ops as _gen_logging_ops
from tensorflow.python.ops import gen_summary_ops as _gen_summary_ops # pylint: disable=unused-import
from tensorflow.python.ops import summary_op_util as _summary_op_util
# exports FileWriter, FileWriterCache
# pylint: disable=unused-import
from tensorflow.python.summary.writer.writer import FileWriter
from tensorflow.python.summary.writer.writer_cache import FileWriterCache
# pylint: enable=unused-import
from tensorflow.python.util import compat as _compat
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['summary.scalar'])
def scalar(name, tensor, collections=None, family=None):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.scalar_summary(tags=tag, values=tensor, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.image'])
def image(name, tensor, max_outputs=3, collections=None, family=None):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.image_summary(
tag=tag, tensor=tensor, max_images=max_outputs, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.histogram'])
def histogram(name, values, collections=None, family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[values],
default_name='HistogramSummary') as (tag, scope):
val = _gen_logging_ops.histogram_summary(
tag=tag, values=values, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.audio'])
def audio(name, tensor, sample_rate, max_outputs=3, collections=None,
family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/audio/0', '*name*/audio/1', etc
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family=family, values=[tensor]) as (tag, scope):
sample_rate = _ops.convert_to_tensor(
sample_rate, dtype=_dtypes.float32, name='sample_rate')
val = _gen_logging_ops.audio_summary_v2(
tag=tag, tensor=tensor, max_outputs=max_outputs,
sample_rate=sample_rate, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.text'])
def text(name, tensor, collections=None):
"""Summarizes textual data.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markdown
in the strings, and will automatically organize 1d and 2d tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2d subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary api, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: a string-type Tensor to summarize.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
"""
if tensor.dtype != _dtypes.string:
raise ValueError('Expected tensor %s to have dtype string, got %s' %
(tensor.name, tensor.dtype))
summary_metadata = _SummaryMetadata(
plugin_data=_SummaryMetadata.PluginData(plugin_name='text'))
t_summary = tensor_summary(
name=name,
tensor=tensor,
summary_metadata=summary_metadata,
collections=collections)
return t_summary
@tf_export(v1=['summary.tensor_summary'])
def tensor_summary(name,
tensor,
summary_description=None,
collections=None,
summary_metadata=None,
family=None,
display_name=None):
"""Outputs a `Summary` protocol buffer with a serialized tensor.proto.
Args:
name: A name for the generated node. If display_name is not set, it will
also serve as the tag name in TensorBoard. (In that case, the tag
name will inherit tf name scopes.)
tensor: A tensor of any type and shape to serialize.
summary_description: A long description of the summary sequence. Markdown
is supported.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
summary_metadata: Optional SummaryMetadata proto (which describes which
plugins may use the summary value).
family: Optional; if provided, used as the prefix of the summary tag,
which controls the name used for display on TensorBoard when
display_name is not set.
display_name: A string used to name this data in TensorBoard. If this is
not set, then the node name will be used instead.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if summary_metadata is None:
summary_metadata = _SummaryMetadata()
if summary_description is not None:
summary_metadata.summary_description = summary_description
if display_name is not None:
summary_metadata.display_name = display_name
serialized_summary_metadata = summary_metadata.SerializeToString()
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.tensor_summary_v2(
tensor=tensor,
tag=tag,
name=scope,
serialized_summary_metadata=serialized_summary_metadata)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.merge'])
def merge(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager mode enabled.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
# pylint: enable=line-too-long
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
name = _summary_op_util.clean_tag(name)
with _ops.name_scope(name, 'Merge', inputs):
val = _gen_logging_ops.merge_summary(inputs=inputs, name=name)
_summary_op_util.collect(val, collections, [])
return val
@tf_export(v1=['summary.merge_all'])
def merge_all(key=_ops.GraphKeys.SUMMARIES, scope=None, name=None):
"""Merges all summaries collected in the default graph.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
scope: Optional scope used to filter the summary ops, using `re.match`
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
if _context.executing_eagerly():
raise RuntimeError(
'Merging tf.summary.* ops is not compatible with eager execution. '
'Use tf.contrib.summary instead.')
summary_ops = _ops.get_collection(key, scope=scope)
if not summary_ops:
return None
else:
return merge(summary_ops, name=name)
@tf_export(v1=['summary.get_summary_description'])
def get_summary_description(node_def):
"""Given a TensorSummary node_def, retrieve its SummaryDescription.
When a Summary op is instantiated, a SummaryDescription of associated
metadata is stored in its NodeDef. This method retrieves the description.
Args:
node_def: the node_def_pb2.NodeDef of a TensorSummary op
Returns:
a summary_pb2.SummaryDescription
Raises:
ValueError: if the node is not a summary op.
@compatibility(eager)
Not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
if node_def.op != 'TensorSummary':
raise ValueError("Can't get_summary_description on %s" % node_def.op)
description_str = _compat.as_str_any(node_def.attr['description'].s)
summary_description = SummaryDescription()
_json_format.Parse(description_str, summary_description)
return summary_description
|
tensorflow-master
|
tensorflow/python/summary/summary.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard Plugin asset abstract class.
TensorBoard plugins may need to provide arbitrary assets, such as
configuration information for specific outputs, or vocabulary files, or sprite
images, etc.
This module contains methods that allow plugin assets to be specified at graph
construction time. Plugin authors define a PluginAsset which is treated as a
singleton on a per-graph basis. The PluginAsset has an assets method which
returns a dictionary of asset contents. The tf.compat.v1.summary.FileWriter
(or any other Summary writer) will serialize these assets in such a way that
TensorBoard can retrieve them.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import ops
_PLUGIN_ASSET_PREFIX = "__tensorboard_plugin_asset__"
def get_plugin_asset(plugin_asset_cls, graph=None):
"""Acquire singleton PluginAsset instance from a graph.
PluginAssets are always singletons, and are stored in tf Graph collections.
This way, they can be defined anywhere the graph is being constructed, and
if the same plugin is configured at many different points, the user can always
modify the same instance.
Args:
plugin_asset_cls: The PluginAsset class
graph: (optional) The graph to retrieve the instance from. If not specified,
the default graph is used.
Returns:
An instance of the plugin_asset_class
Raises:
ValueError: If we have a plugin name collision, or if we unexpectedly find
the wrong number of items in a collection.
"""
if graph is None:
graph = ops.get_default_graph()
if not plugin_asset_cls.plugin_name:
raise ValueError("Class %s has no plugin_name" % plugin_asset_cls.__name__)
name = _PLUGIN_ASSET_PREFIX + plugin_asset_cls.plugin_name
container = graph.get_collection(name)
if container:
if len(container) != 1:
raise ValueError("Collection for %s had %d items, expected 1" %
(name, len(container)))
instance = container[0]
if not isinstance(instance, plugin_asset_cls):
raise ValueError("Plugin name collision between classes %s and %s" %
(plugin_asset_cls.__name__, instance.__class__.__name__))
else:
instance = plugin_asset_cls()
graph.add_to_collection(name, instance)
graph.add_to_collection(_PLUGIN_ASSET_PREFIX, plugin_asset_cls.plugin_name)
return instance
def get_all_plugin_assets(graph=None):
"""Retrieve all PluginAssets stored in the graph collection.
Args:
graph: Optionally, the graph to get assets from. If unspecified, the default
graph is used.
Returns:
A list with all PluginAsset instances in the graph.
Raises:
ValueError: if we unexpectedly find a collection with the wrong number of
PluginAssets.
"""
if graph is None:
graph = ops.get_default_graph()
out = []
for name in graph.get_collection(_PLUGIN_ASSET_PREFIX):
collection = graph.get_collection(_PLUGIN_ASSET_PREFIX + name)
if len(collection) != 1:
raise ValueError("Collection for %s had %d items, expected 1" %
(name, len(collection)))
out.append(collection[0])
return out
@six.add_metaclass(abc.ABCMeta)
class PluginAsset(object):
"""This abstract base class allows TensorBoard to serialize assets to disk.
Plugin authors are expected to extend the PluginAsset class, so that it:
- has a unique plugin_name
- provides an assets method that returns an {asset_name: asset_contents}
dictionary. For now, asset_contents are strings, although we may add
StringIO support later.
LifeCycle of a PluginAsset instance:
- It is constructed when get_plugin_asset is called on the class for
the first time.
- It is configured by code that follows the calls to get_plugin_asset
- When the containing graph is serialized by the
tf.compat.v1.summary.FileWriter, the writer calls assets and the
PluginAsset instance provides its contents to be written to disk.
"""
plugin_name = None
@abc.abstractmethod
def assets(self):
"""Provide all of the assets contained by the PluginAsset instance.
The assets method should return a dictionary structured as
{asset_name: asset_contents}. asset_contents is a string.
This method will be called by the tf.compat.v1.summary.FileWriter when it
is time to write the assets out to disk.
"""
raise NotImplementedError()
|
tensorflow-master
|
tensorflow/python/summary/plugin_asset.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the API surface of the V1 tf.summary ops.
These tests don't check the actual serialized proto summary value for the
more complex summaries (e.g. audio, image). Those test live separately in
tensorflow/python/kernel_tests/summary_v1_*.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary as summary_lib
class SummaryTest(test.TestCase):
@test_util.run_deprecated_v1
def testScalarSummary(self):
with self.cached_session() as s:
i = constant_op.constant(3)
with ops.name_scope('outer'):
im = summary_lib.scalar('inner', i)
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'outer/inner')
self.assertEqual(values[0].simple_value, 3.0)
@test_util.run_deprecated_v1
def testScalarSummaryWithFamily(self):
with self.cached_session() as s:
i = constant_op.constant(7)
with ops.name_scope('outer'):
im1 = summary_lib.scalar('inner', i, family='family')
self.assertEquals(im1.op.name, 'outer/family/inner')
im2 = summary_lib.scalar('inner', i, family='family')
self.assertEquals(im2.op.name, 'outer/family/inner_1')
sm1, sm2 = s.run([im1, im2])
summary = summary_pb2.Summary()
summary.ParseFromString(sm1)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'family/outer/family/inner')
self.assertEqual(values[0].simple_value, 7.0)
summary.ParseFromString(sm2)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'family/outer/family/inner_1')
self.assertEqual(values[0].simple_value, 7.0)
@test_util.run_deprecated_v1
def testSummarizingVariable(self):
with self.cached_session() as s:
c = constant_op.constant(42.0)
v = variables.Variable(c)
ss = summary_lib.scalar('summary', v)
init = variables.global_variables_initializer()
s.run(init)
summ_str = s.run(ss)
summary = summary_pb2.Summary()
summary.ParseFromString(summ_str)
self.assertEqual(len(summary.value), 1)
value = summary.value[0]
self.assertEqual(value.tag, 'summary')
self.assertEqual(value.simple_value, 42.0)
@test_util.run_deprecated_v1
def testImageSummary(self):
with self.cached_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
im = summary_lib.image('inner', i, max_outputs=3)
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('outer/inner/image/{}'.format(i) for i in xrange(3))
self.assertEqual(tags, expected)
@test_util.run_deprecated_v1
def testImageSummaryWithFamily(self):
with self.cached_session() as s:
i = array_ops.ones((5, 2, 3, 1))
with ops.name_scope('outer'):
im = summary_lib.image('inner', i, max_outputs=3, family='family')
self.assertEquals(im.op.name, 'outer/family/inner')
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('family/outer/family/inner/image/{}'.format(i)
for i in xrange(3))
self.assertEqual(tags, expected)
@test_util.run_deprecated_v1
def testHistogramSummary(self):
with self.cached_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
summ_op = summary_lib.histogram('inner', i)
summary_str = s.run(summ_op)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
self.assertEqual(len(summary.value), 1)
self.assertEqual(summary.value[0].tag, 'outer/inner')
@test_util.run_deprecated_v1
def testHistogramSummaryWithFamily(self):
with self.cached_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
summ_op = summary_lib.histogram('inner', i, family='family')
self.assertEquals(summ_op.op.name, 'outer/family/inner')
summary_str = s.run(summ_op)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
self.assertEqual(len(summary.value), 1)
self.assertEqual(summary.value[0].tag, 'family/outer/family/inner')
def testHistogramSummaryTypes(self):
for dtype in (dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.int32,
dtypes.float32, dtypes.float64):
const = constant_op.constant(10, dtype=dtype)
summary_lib.histogram('h', const)
@test_util.run_deprecated_v1
def testAudioSummary(self):
with self.cached_session() as s:
i = array_ops.ones((5, 3, 4))
with ops.name_scope('outer'):
aud = summary_lib.audio('inner', i, 0.2, max_outputs=3)
summary_str = s.run(aud)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('outer/inner/audio/{}'.format(i) for i in xrange(3))
self.assertEqual(tags, expected)
@test_util.run_deprecated_v1
def testAudioSummaryWithFamily(self):
with self.cached_session() as s:
i = array_ops.ones((5, 3, 4))
with ops.name_scope('outer'):
aud = summary_lib.audio('inner', i, 0.2, max_outputs=3, family='family')
self.assertEquals(aud.op.name, 'outer/family/inner')
summary_str = s.run(aud)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('family/outer/family/inner/audio/{}'.format(i)
for i in xrange(3))
self.assertEqual(tags, expected)
@test_util.run_deprecated_v1
def testTextSummary(self):
with self.cached_session():
with self.assertRaises(ValueError):
num = array_ops.constant(1)
summary_lib.text('foo', num)
# The API accepts vectors.
arr = array_ops.constant(['one', 'two', 'three'])
summ = summary_lib.text('foo', arr)
self.assertEqual(summ.op.type, 'TensorSummaryV2')
# the API accepts scalars
summ = summary_lib.text('foo', array_ops.constant('one'))
self.assertEqual(summ.op.type, 'TensorSummaryV2')
@test_util.run_deprecated_v1
def testSummaryNameConversion(self):
c = constant_op.constant(3)
s = summary_lib.scalar('name with spaces', c)
self.assertEqual(s.op.name, 'name_with_spaces')
s2 = summary_lib.scalar('name with many $#illegal^: characters!', c)
self.assertEqual(s2.op.name, 'name_with_many___illegal___characters_')
s3 = summary_lib.scalar('/name/with/leading/slash', c)
self.assertEqual(s3.op.name, 'name/with/leading/slash')
@test_util.run_deprecated_v1
def testSummaryWithFamilyMetaGraphExport(self):
with ops.name_scope('outer'):
i = constant_op.constant(11)
summ = summary_lib.scalar('inner', i)
self.assertEquals(summ.op.name, 'outer/inner')
summ_f = summary_lib.scalar('inner', i, family='family')
self.assertEquals(summ_f.op.name, 'outer/family/inner')
metagraph_def, _ = meta_graph.export_scoped_meta_graph(export_scope='outer')
with ops.Graph().as_default() as g:
meta_graph.import_scoped_meta_graph(metagraph_def, graph=g,
import_scope='new_outer')
# The summaries should exist, but with outer scope renamed.
new_summ = g.get_tensor_by_name('new_outer/inner:0')
new_summ_f = g.get_tensor_by_name('new_outer/family/inner:0')
# However, the tags are unaffected.
with self.cached_session() as s:
new_summ_str, new_summ_f_str = s.run([new_summ, new_summ_f])
new_summ_pb = summary_pb2.Summary()
new_summ_pb.ParseFromString(new_summ_str)
self.assertEquals('outer/inner', new_summ_pb.value[0].tag)
new_summ_f_pb = summary_pb2.Summary()
new_summ_f_pb.ParseFromString(new_summ_f_str)
self.assertEquals('family/outer/family/inner',
new_summ_f_pb.value[0].tag)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/summary/summary_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides a method for reading events from an event file via an iterator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['train.summary_iterator'])
def summary_iterator(path):
# pylint: disable=line-too-long
"""An iterator for reading `Event` protocol buffers from an event file.
You can use this function to read events written to an event file. It returns
a Python iterator that yields `Event` protocol buffers.
Example: Print the contents of an events file.
```python
for e in tf.compat.v1.train.summary_iterator(path to events file):
print(e)
```
Example: Print selected summary values.
```python
# This example supposes that the events file contains summaries with a
# summary value tag 'loss'. These could have been added by calling
# `add_summary()`, passing the output of a scalar summary op created with
# with: `tf.compat.v1.summary.scalar('loss', loss_tensor)`.
for e in tf.compat.v1.train.summary_iterator(path to events file):
for v in e.summary.value:
if v.tag == 'loss':
print(v.simple_value)
```
See the protocol buffer definitions of
[Event](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
and
[Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
for more information about their attributes.
Args:
path: The path to an event file created by a `SummaryWriter`.
Yields:
`Event` protocol buffers.
"""
# pylint: enable=line-too-long
for r in tf_record.tf_record_iterator(path):
yield event_pb2.Event.FromString(r)
|
tensorflow-master
|
tensorflow/python/summary/summary_iterator.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training_coordinator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import shutil
import time
import warnings
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.util import compat
class FileWriterTestCase(test.TestCase):
def _FileWriter(self, *args, **kwargs):
return writer.FileWriter(*args, **kwargs)
def _TestDir(self, test_name):
test_dir = os.path.join(self.get_temp_dir(), test_name)
return test_dir
def _CleanTestDir(self, test_name):
test_dir = self._TestDir(test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
def _EventsReader(self, test_dir):
event_paths = glob.glob(os.path.join(test_dir, "event*"))
# If the tests runs multiple times in the same directory we can have
# more than one matching event file. We only want to read the last one.
self.assertTrue(event_paths)
return summary_iterator.summary_iterator(event_paths[-1])
def _assertRecent(self, t):
self.assertTrue(abs(t - time.time()) < 5)
def _assertEventsWithGraph(self, test_dir, g, has_shapes):
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=g.as_graph_def(add_shapes=has_shapes))
rr = self._EventsReader(test_dir)
# The first event should list the file_version.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals("brain.Event:2", ev.file_version)
# The next event should have the graph.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(0, ev.step)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(g.as_graph_def(add_shapes=has_shapes), ev_graph)
# The next event should have the metagraph.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(0, ev.step)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
@test_util.run_deprecated_v1
def testAddingSummaryGraphAndRunMetadata(self):
test_dir = self._CleanTestDir("basics")
sw = self._FileWriter(test_dir)
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
sw.add_summary(
summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag="mee", simple_value=10.0)]),
10)
sw.add_summary(
summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag="boo", simple_value=20.0)]),
20)
with ops.Graph().as_default() as g:
constant_op.constant([0], name="zero")
sw.add_graph(g, global_step=30)
run_metadata = config_pb2.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = "test"
sw.add_run_metadata(run_metadata, "test run", global_step=40)
sw.close()
rr = self._EventsReader(test_dir)
# The first event should list the file_version.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals("brain.Event:2", ev.file_version)
# The next event should be the START message.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(1, ev.step)
self.assertEquals(SessionLog.START, ev.session_log.status)
# The next event should have the value 'mee=10.0'.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(10, ev.step)
self.assertProtoEquals("""
value { tag: 'mee' simple_value: 10.0 }
""", ev.summary)
# The next event should have the value 'boo=20.0'.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(20, ev.step)
self.assertProtoEquals("""
value { tag: 'boo' simple_value: 20.0 }
""", ev.summary)
# The next event should have the graph_def.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(30, ev.step)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(g.as_graph_def(add_shapes=True), ev_graph)
# The next event should have metadata for the run.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(40, ev.step)
self.assertEquals("test run", ev.tagged_run_metadata.tag)
parsed_run_metadata = config_pb2.RunMetadata()
parsed_run_metadata.ParseFromString(ev.tagged_run_metadata.run_metadata)
self.assertProtoEquals(run_metadata, parsed_run_metadata)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
@test_util.run_deprecated_v1
def testGraphAsNamed(self):
test_dir = self._CleanTestDir("basics_named_graph")
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
sw = self._FileWriter(test_dir, graph=g)
sw.close()
self._assertEventsWithGraph(test_dir, g, True)
@test_util.run_deprecated_v1
def testGraphAsPositional(self):
test_dir = self._CleanTestDir("basics_positional_graph")
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
sw = self._FileWriter(test_dir, g)
sw.close()
self._assertEventsWithGraph(test_dir, g, True)
@test_util.run_deprecated_v1
def testGraphDefAsNamed(self):
test_dir = self._CleanTestDir("basics_named_graph_def")
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
gd = g.as_graph_def()
sw = self._FileWriter(test_dir, graph_def=gd)
sw.close()
self._assertEventsWithGraph(test_dir, g, False)
@test_util.run_deprecated_v1
def testGraphDefAsPositional(self):
test_dir = self._CleanTestDir("basics_positional_graph_def")
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
gd = g.as_graph_def()
sw = self._FileWriter(test_dir, gd)
sw.close()
self._assertEventsWithGraph(test_dir, g, False)
@test_util.run_deprecated_v1
def testGraphAndGraphDef(self):
with self.assertRaises(ValueError):
test_dir = self._CleanTestDir("basics_graph_and_graph_def")
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
gd = g.as_graph_def()
sw = self._FileWriter(test_dir, graph=g, graph_def=gd)
sw.close()
@test_util.run_deprecated_v1
def testNeitherGraphNorGraphDef(self):
with self.assertRaises(TypeError):
test_dir = self._CleanTestDir("basics_string_instead_of_graph")
sw = self._FileWriter(test_dir, "string instead of graph object")
sw.close()
@test_util.run_deprecated_v1
def testCloseAndReopen(self):
test_dir = self._CleanTestDir("close_and_reopen")
sw = self._FileWriter(test_dir)
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
sw.close()
# Sleep at least one second to make sure we get a new event file name.
time.sleep(1.2)
sw.reopen()
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 2)
sw.close()
# We should now have 2 events files.
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
self.assertEquals(2, len(event_paths))
# Check the first file contents.
rr = summary_iterator.summary_iterator(event_paths[0])
# The first event should list the file_version.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals("brain.Event:2", ev.file_version)
# The next event should be the START message.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(1, ev.step)
self.assertEquals(SessionLog.START, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
# Check the second file contents.
rr = summary_iterator.summary_iterator(event_paths[1])
# The first event should list the file_version.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals("brain.Event:2", ev.file_version)
# The next event should be the START message.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(2, ev.step)
self.assertEquals(SessionLog.START, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
@test_util.run_deprecated_v1
def testNonBlockingClose(self):
test_dir = self._CleanTestDir("non_blocking_close")
sw = self._FileWriter(test_dir)
# Sleep 1.2 seconds to make sure event queue is empty.
time.sleep(1.2)
time_before_close = time.time()
sw.close()
self._assertRecent(time_before_close)
@test_util.run_deprecated_v1
def testUseAfterClose(self):
test_dir = self._CleanTestDir("use_after_close")
sw = self._FileWriter(test_dir)
sw.close()
with warnings.catch_warnings(record=True) as triggered:
warnings.simplefilter("always")
self.assertFalse(triggered)
sw.add_summary(summary_pb2.Summary())
sw.add_session_log(event_pb2.SessionLog())
sw.add_graph(ops.Graph())
self.assertEqual(len(triggered), 3)
for w in triggered:
self.assertEqual(w.category, UserWarning)
@test_util.run_deprecated_v1
def testWithStatement(self):
test_dir = self._CleanTestDir("with_statement")
with self._FileWriter(test_dir) as sw:
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
self.assertEquals(1, len(event_paths))
# Checks that values returned from session Run() calls are added correctly to
# summaries. These are numpy types so we need to check they fit in the
# protocol buffers correctly.
@test_util.run_deprecated_v1
def testAddingSummariesFromSessionRunCalls(self):
test_dir = self._CleanTestDir("global_step")
sw = self._FileWriter(test_dir)
with self.cached_session():
i = constant_op.constant(1, dtype=dtypes.int32, shape=[])
l = constant_op.constant(2, dtype=dtypes.int64, shape=[])
# Test the summary can be passed serialized.
summ = summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag="i", simple_value=1.0)])
sw.add_summary(summ.SerializeToString(), self.evaluate(i))
sw.add_summary(
summary_pb2.Summary(
value=[summary_pb2.Summary.Value(tag="l", simple_value=2.0)]),
self.evaluate(l))
sw.close()
rr = self._EventsReader(test_dir)
# File_version.
ev = next(rr)
self.assertTrue(ev)
self._assertRecent(ev.wall_time)
self.assertEquals("brain.Event:2", ev.file_version)
# Summary passed serialized.
ev = next(rr)
self.assertTrue(ev)
self._assertRecent(ev.wall_time)
self.assertEquals(1, ev.step)
self.assertProtoEquals("""
value { tag: 'i' simple_value: 1.0 }
""", ev.summary)
# Summary passed as SummaryObject.
ev = next(rr)
self.assertTrue(ev)
self._assertRecent(ev.wall_time)
self.assertEquals(2, ev.step)
self.assertProtoEquals("""
value { tag: 'l' simple_value: 2.0 }
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
@test_util.run_deprecated_v1
def testPluginMetadataStrippedFromSubsequentEvents(self):
test_dir = self._CleanTestDir("basics")
sw = self._FileWriter(test_dir)
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
# We add 2 summaries with the same tags. They both have metadata. The writer
# should strip the metadata from the second one.
value = summary_pb2.Summary.Value(tag="foo", simple_value=10.0)
value.metadata.plugin_data.plugin_name = "bar"
value.metadata.plugin_data.content = compat.as_bytes("... content ...")
sw.add_summary(summary_pb2.Summary(value=[value]), 10)
value = summary_pb2.Summary.Value(tag="foo", simple_value=10.0)
value.metadata.plugin_data.plugin_name = "bar"
value.metadata.plugin_data.content = compat.as_bytes("... content ...")
sw.add_summary(summary_pb2.Summary(value=[value]), 10)
sw.close()
rr = self._EventsReader(test_dir)
# The first event should list the file_version.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals("brain.Event:2", ev.file_version)
# The next event should be the START message.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(1, ev.step)
self.assertEquals(SessionLog.START, ev.session_log.status)
# This is the first event with tag foo. It should contain SummaryMetadata.
ev = next(rr)
self.assertProtoEquals("""
value {
tag: "foo"
simple_value: 10.0
metadata {
plugin_data {
plugin_name: "bar"
content: "... content ..."
}
}
}
""", ev.summary)
# This is the second event with tag foo. It should lack SummaryMetadata
# because the file writer should have stripped it.
ev = next(rr)
self.assertProtoEquals("""
value {
tag: "foo"
simple_value: 10.0
}
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
@test_util.run_deprecated_v1
def testFileWriterWithSuffix(self):
test_dir = self._CleanTestDir("test_suffix")
sw = self._FileWriter(test_dir, filename_suffix="_test_suffix")
for _ in range(10):
sw.add_summary(
summary_pb2.Summary(value=[
summary_pb2.Summary.Value(tag="float_ten", simple_value=10.0)
]),
10)
sw.close()
sw.reopen()
sw.close()
event_filenames = glob.glob(os.path.join(test_dir, "event*"))
for filename in event_filenames:
self.assertTrue(filename.endswith("_test_suffix"))
def testPluginAssetSerialized(self):
class ExamplePluginAsset(plugin_asset.PluginAsset):
plugin_name = "example"
def assets(self):
return {"foo.txt": "foo!", "bar.txt": "bar!"}
with ops.Graph().as_default() as g:
plugin_asset.get_plugin_asset(ExamplePluginAsset)
logdir = self.get_temp_dir()
fw = self._FileWriter(logdir)
fw.add_graph(g)
plugin_dir = os.path.join(logdir, writer._PLUGINS_DIR, "example")
with gfile.Open(os.path.join(plugin_dir, "foo.txt"), "r") as f:
content = f.read()
self.assertEqual(content, "foo!")
with gfile.Open(os.path.join(plugin_dir, "bar.txt"), "r") as f:
content = f.read()
self.assertEqual(content, "bar!")
class SessionBasedFileWriterTestCase(FileWriterTestCase):
"""Tests for FileWriter behavior when passed a Session argument."""
def _FileWriter(self, *args, **kwargs):
if "session" not in kwargs:
# Pass in test_session() as the session. It will be cached during this
# test method invocation so that any other use of test_session() with no
# graph should result in re-using the same underlying Session.
with self.cached_session() as sess:
kwargs["session"] = sess
return writer.FileWriter(*args, **kwargs)
return writer.FileWriter(*args, **kwargs)
def _createTaggedSummary(self, tag):
summary = summary_pb2.Summary()
summary.value.add(tag=tag)
return summary
def testSharing_withOtherSessionBasedFileWriters(self):
logdir = self.get_temp_dir()
with session.Session() as sess:
# Initial file writer
writer1 = writer.FileWriter(session=sess, logdir=logdir)
writer1.add_summary(self._createTaggedSummary("one"), 1)
writer1.flush()
# File writer, should share file with writer1
writer2 = writer.FileWriter(session=sess, logdir=logdir)
writer2.add_summary(self._createTaggedSummary("two"), 2)
writer2.flush()
# File writer with different logdir (shouldn't be in this logdir at all)
writer3 = writer.FileWriter(session=sess, logdir=logdir + "-other")
writer3.add_summary(self._createTaggedSummary("three"), 3)
writer3.flush()
# File writer in a different session (should be in separate file)
time.sleep(1.1) # Ensure filename has a different timestamp
with session.Session() as other_sess:
writer4 = writer.FileWriter(session=other_sess, logdir=logdir)
writer4.add_summary(self._createTaggedSummary("four"), 4)
writer4.flush()
# One more file writer, should share file with writer1
writer5 = writer.FileWriter(session=sess, logdir=logdir)
writer5.add_summary(self._createTaggedSummary("five"), 5)
writer5.flush()
event_paths = iter(sorted(glob.glob(os.path.join(logdir, "event*"))))
# First file should have tags "one", "two", and "five"
events = summary_iterator.summary_iterator(next(event_paths))
self.assertEqual("brain.Event:2", next(events).file_version)
self.assertEqual("one", next(events).summary.value[0].tag)
self.assertEqual("two", next(events).summary.value[0].tag)
self.assertEqual("five", next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# Second file should have just "four"
events = summary_iterator.summary_iterator(next(event_paths))
self.assertEqual("brain.Event:2", next(events).file_version)
self.assertEqual("four", next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# No more files
self.assertRaises(StopIteration, lambda: next(event_paths))
# Just check that the other logdir file exists to be sure we wrote it
self.assertTrue(glob.glob(os.path.join(logdir + "-other", "event*")))
def testSharing_withExplicitSummaryFileWriters(self):
logdir = self.get_temp_dir()
with session.Session() as sess:
# Initial file writer via FileWriter(session=?)
writer1 = writer.FileWriter(session=sess, logdir=logdir)
writer1.add_summary(self._createTaggedSummary("one"), 1)
writer1.flush()
# Next one via create_file_writer(), should use same file
writer2 = summary_ops_v2.create_file_writer(logdir=logdir)
with summary_ops_v2.always_record_summaries(), writer2.as_default():
summary2 = summary_ops_v2.scalar("two", 2.0, step=2)
sess.run(writer2.init())
sess.run(summary2)
sess.run(writer2.flush())
# Next has different shared name, should be in separate file
time.sleep(1.1) # Ensure filename has a different timestamp
writer3 = summary_ops_v2.create_file_writer(logdir=logdir, name="other")
with summary_ops_v2.always_record_summaries(), writer3.as_default():
summary3 = summary_ops_v2.scalar("three", 3.0, step=3)
sess.run(writer3.init())
sess.run(summary3)
sess.run(writer3.flush())
# Next uses a second session, should be in separate file
time.sleep(1.1) # Ensure filename has a different timestamp
with session.Session() as other_sess:
writer4 = summary_ops_v2.create_file_writer(logdir=logdir)
with summary_ops_v2.always_record_summaries(), writer4.as_default():
summary4 = summary_ops_v2.scalar("four", 4.0, step=4)
other_sess.run(writer4.init())
other_sess.run(summary4)
other_sess.run(writer4.flush())
# Next via FileWriter(session=?) uses same second session, should be in
# same separate file. (This checks sharing in the other direction)
writer5 = writer.FileWriter(session=other_sess, logdir=logdir)
writer5.add_summary(self._createTaggedSummary("five"), 5)
writer5.flush()
# One more via create_file_writer(), should use same file
writer6 = summary_ops_v2.create_file_writer(logdir=logdir)
with summary_ops_v2.always_record_summaries(), writer6.as_default():
summary6 = summary_ops_v2.scalar("six", 6.0, step=6)
sess.run(writer6.init())
sess.run(summary6)
sess.run(writer6.flush())
event_paths = iter(sorted(glob.glob(os.path.join(logdir, "event*"))))
# First file should have tags "one", "two", and "six"
events = summary_iterator.summary_iterator(next(event_paths))
self.assertEqual("brain.Event:2", next(events).file_version)
self.assertEqual("one", next(events).summary.value[0].tag)
self.assertEqual("two", next(events).summary.value[0].tag)
self.assertEqual("six", next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# Second file should have just "three"
events = summary_iterator.summary_iterator(next(event_paths))
self.assertEqual("brain.Event:2", next(events).file_version)
self.assertEqual("three", next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# Third file should have "four" and "five"
events = summary_iterator.summary_iterator(next(event_paths))
self.assertEqual("brain.Event:2", next(events).file_version)
self.assertEqual("four", next(events).summary.value[0].tag)
self.assertEqual("five", next(events).summary.value[0].tag)
self.assertRaises(StopIteration, lambda: next(events))
# No more files
self.assertRaises(StopIteration, lambda: next(event_paths))
class FileWriterCacheTest(test.TestCase):
"""FileWriterCache tests."""
def _test_dir(self, test_name):
"""Create an empty dir to use for tests.
Args:
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(self.get_temp_dir(), test_name)
if os.path.isdir(test_dir):
for f in glob.glob("%s/*" % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
def test_cache(self):
with ops.Graph().as_default():
dir1 = self._test_dir("test_cache_1")
dir2 = self._test_dir("test_cache_2")
sw1 = writer_cache.FileWriterCache.get(dir1)
sw2 = writer_cache.FileWriterCache.get(dir2)
sw3 = writer_cache.FileWriterCache.get(dir1)
self.assertEqual(sw1, sw3)
self.assertFalse(sw1 == sw2)
sw1.close()
sw2.close()
events1 = glob.glob(os.path.join(dir1, "event*"))
self.assertTrue(events1)
events2 = glob.glob(os.path.join(dir2, "event*"))
self.assertTrue(events2)
events3 = glob.glob(os.path.join("nowriter", "event*"))
self.assertFalse(events3)
def test_clear(self):
with ops.Graph().as_default():
dir1 = self._test_dir("test_clear")
sw1 = writer_cache.FileWriterCache.get(dir1)
writer_cache.FileWriterCache.clear()
sw2 = writer_cache.FileWriterCache.get(dir1)
self.assertFalse(sw1 == sw2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/summary/writer/writer_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A cache for FileWriters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import ops
from tensorflow.python.summary.writer.writer import FileWriter
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['summary.FileWriterCache'])
class FileWriterCache(object):
"""Cache for file writers.
This class caches file writers, one per directory.
"""
# Cache, keyed by directory.
_cache = {}
# Lock protecting _FILE_WRITERS.
_lock = threading.RLock()
@staticmethod
def clear():
"""Clear cached summary writers. Currently only used for unit tests."""
with FileWriterCache._lock:
# Make sure all the writers are closed now (otherwise open file handles
# may hang around, blocking deletions on Windows).
for item in FileWriterCache._cache.values():
item.close()
FileWriterCache._cache = {}
@staticmethod
def get(logdir):
"""Returns the FileWriter for the specified directory.
Args:
logdir: str, name of the directory.
Returns:
A `FileWriter`.
"""
with FileWriterCache._lock:
if logdir not in FileWriterCache._cache:
FileWriterCache._cache[logdir] = FileWriter(
logdir, graph=ops.get_default_graph())
return FileWriterCache._cache[logdir]
|
tensorflow-master
|
tensorflow/python/summary/writer/writer_cache.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes events to disk in a logdir."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import gfile
class EventFileWriterV2(object):
"""Writes `Event` protocol buffers to an event file via the graph.
The `EventFileWriterV2` class is backed by the summary file writer in the v2
summary API (currently in tf.contrib.summary), so it uses a shared summary
writer resource and graph ops to write events.
As with the original EventFileWriter, this class will asynchronously write
Event protocol buffers to the backing file. The Event file is encoded using
the tfrecord format, which is similar to RecordIO.
"""
def __init__(self, session, logdir, max_queue=10, flush_secs=120,
filename_suffix=''):
"""Creates an `EventFileWriterV2` and an event file to write to.
On construction, this calls `tf.contrib.summary.create_file_writer` within
the graph from `session.graph` to look up a shared summary writer resource
for `logdir` if one exists, and create one if not. Creating the summary
writer resource in turn creates a new event file in `logdir` to be filled
with `Event` protocol buffers passed to `add_event`. Graph ops to control
this writer resource are added to `session.graph` during this init call;
stateful methods on this class will call `session.run()` on these ops.
Note that because the underlying resource is shared, it is possible that
other parts of the code using the same session may interact independently
with the resource, e.g. by flushing or even closing it. It is the caller's
responsibility to avoid any undesirable sharing in this regard.
The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
`filename_suffix`) control the construction of the shared writer resource
if one is created. If an existing resource is reused, these arguments have
no effect. See `tf.contrib.summary.create_file_writer` for details.
Args:
session: A `tf.compat.v1.Session`. Session that will hold shared writer
resource. The writer ops will be added to session.graph during this
init call.
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._session = session
self._logdir = logdir
self._closed = False
if not gfile.IsDirectory(self._logdir):
gfile.MakeDirs(self._logdir)
with self._session.graph.as_default():
with ops.name_scope('filewriter'):
file_writer = summary_ops_v2.create_file_writer(
logdir=self._logdir,
max_queue=max_queue,
flush_millis=flush_secs * 1000,
filename_suffix=filename_suffix)
with summary_ops_v2.always_record_summaries(), file_writer.as_default():
self._event_placeholder = array_ops.placeholder_with_default(
constant_op.constant('unused', dtypes.string),
shape=[])
self._add_event_op = summary_ops_v2.import_event(
self._event_placeholder)
self._init_op = file_writer.init()
self._flush_op = file_writer.flush()
self._close_op = file_writer.close()
self._session.run(self._init_op)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._closed = False
self._session.run(self._init_op)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
event_pb = event.SerializeToString()
self._session.run(
self._add_event_op, feed_dict={self._event_placeholder: event_pb})
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._session.run(self._flush_op)
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
if not self._closed:
self.flush()
self._session.run(self._close_op)
self._closed = True
|
tensorflow-master
|
tensorflow/python/summary/writer/event_file_writer_v2.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes events to disk in a logdir."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import threading
import time
import six
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
class EventFileWriter(object):
"""Writes `Event` protocol buffers to an event file.
The `EventFileWriter` class creates an event file in the specified directory,
and asynchronously writes Event protocol buffers to the file. The Event file
is encoded using the tfrecord format, which is similar to RecordIO.
"""
def __init__(self, logdir, max_queue=10, flush_secs=120,
filename_suffix=None):
"""Creates a `EventFileWriter` and an event file to write to.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers, which are written to
disk via the add_event method.
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._logdir = str(logdir)
if not gfile.IsDirectory(self._logdir):
gfile.MakeDirs(self._logdir)
self._event_queue = six.moves.queue.Queue(max_queue)
self._ev_writer = pywrap_tensorflow.EventsWriter(
compat.as_bytes(os.path.join(self._logdir, "events")))
self._flush_secs = flush_secs
self._sentinel_event = self._get_sentinel_event()
if filename_suffix:
self._ev_writer.InitWithSuffix(compat.as_bytes(filename_suffix))
self._closed = False
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
self._flush_secs, self._sentinel_event)
self._worker.start()
def _get_sentinel_event(self):
"""Generate a sentinel event for terminating worker."""
return event_pb2.Event()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
self._flush_secs, self._sentinel_event)
self._worker.start()
self._closed = False
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
self._event_queue.put(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._event_queue.join()
self._ev_writer.Flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.add_event(self._sentinel_event)
self.flush()
self._worker.join()
self._ev_writer.Close()
self._closed = True
class _EventLoggerThread(threading.Thread):
"""Thread that logs events."""
def __init__(self, queue, ev_writer, flush_secs, sentinel_event):
"""Creates an _EventLoggerThread.
Args:
queue: A Queue from which to dequeue events.
ev_writer: An event writer. Used to log brain events for
the visualizer.
flush_secs: How often, in seconds, to flush the
pending file to disk.
sentinel_event: A sentinel element in queue that tells this thread to
terminate.
"""
threading.Thread.__init__(self)
self.daemon = True
self._queue = queue
self._ev_writer = ev_writer
self._flush_secs = flush_secs
# The first event will be flushed immediately.
self._next_event_flush_time = 0
self._sentinel_event = sentinel_event
def run(self):
while True:
event = self._queue.get()
if event is self._sentinel_event:
self._queue.task_done()
break
try:
self._ev_writer.WriteEvent(event)
# Flush the event writer every so often.
now = time.time()
if now > self._next_event_flush_time:
self._ev_writer.Flush()
# Do it again in two minutes.
self._next_event_flush_time = now + self._flush_secs
finally:
self._queue.task_done()
|
tensorflow-master
|
tensorflow/python/summary/writer/event_file_writer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import warnings
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
`tf.Session.run` or
`tf.Tensor.eval`, to this
function. Alternatively, you can pass a `tf.compat.v1.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as returned by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s" %
type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
@tf_export(v1=["summary.FileWriter"])
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
When constructed with a `tf.compat.v1.Session` parameter, a `FileWriter`
instead forms a compatibility layer over new graph-based summaries
(`tf.contrib.summary`) to facilitate the use of new summary writing with
pre-existing code that expects a `FileWriter` instance.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None,
filename_suffix=None,
session=None):
"""Creates a `FileWriter`, optionally shared within the given session.
Typically, constructing a file writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
The `session` argument to the constructor makes the returned `FileWriter` a
compatibility layer over new graph-based summaries (`tf.contrib.summary`).
Crucially, this means the underlying writer resource and events file will
be shared with any other `FileWriter` using the same `session` and `logdir`,
and with any `tf.contrib.summary.SummaryWriter` in this session using the
the same shared resource name (which by default scoped to the logdir). If
no such resource exists, one will be created using the remaining arguments
to this constructor, but if one already exists those arguments are ignored.
In either case, ops will be added to `session.graph` to control the
underlying file writer resource. See `tf.contrib.summary` for more details.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
filename_suffix: A string. Every event file's name is suffixed with
`suffix`.
session: A `tf.compat.v1.Session` object. See details above.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`FileWriter` is not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"tf.summary.FileWriter is not compatible with eager execution. "
"Use tf.contrib.summary instead.")
if session is not None:
event_writer = EventFileWriterV2(
session, logdir, max_queue, flush_secs, filename_suffix)
else:
event_writer = EventFileWriter(logdir, max_queue, flush_secs,
filename_suffix)
self._closed = False
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def _warn_if_event_writer_is_closed(self):
if self._closed:
warnings.warn("Attempting to use a closed FileWriter. "
"The operation will be a noop unless the FileWriter "
"is explicitly reopened.")
def _add_event(self, event, step):
self._warn_if_event_writer_is_closed()
super(FileWriter, self)._add_event(event, step)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self._warn_if_event_writer_is_closed()
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
# Flushing a closed EventFileWriterV2 raises an exception. It is,
# however, a noop for EventFileWriter.
self._warn_if_event_writer_is_closed()
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
self._closed = True
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
self._closed = False
|
tensorflow-master
|
tensorflow/python/summary/writer/writer.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public Python API of TensorFlow Debugger (tfdbg).
See the [TFDBG](https://www.tensorflow.org/guide/debugger) guide.
@@add_debug_tensor_watch
@@watch_graph
@@watch_graph_with_blacklists
@@DebugTensorDatum
@@DebugDumpDir
@@load_tensor_from_event
@@load_tensor_from_event_file
@@has_inf_or_nan
@@DumpingDebugHook
@@DumpingDebugWrapperSession
@@GrpcDebugHook
@@GrpcDebugWrapperSession
@@LocalCLIDebugHook
@@LocalCLIDebugWrapperSession
@@TensorBoardDebugHook
@@TensorBoardDebugWrapperSession
@@WatchOptions
@@reconstruct_non_debug_graph_def
@@GradientsDebugger
@@clear_gradient_debuggers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-imports
from tensorflow.python.debug.lib.debug_data import DebugDumpDir
from tensorflow.python.debug.lib.debug_data import DebugTensorDatum
from tensorflow.python.debug.lib.debug_data import has_inf_or_nan
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event_file
from tensorflow.python.debug.lib.debug_gradients import GradientsDebugger
from tensorflow.python.debug.lib.debug_graphs import reconstruct_non_debug_graph_def
from tensorflow.python.debug.lib.debug_utils import add_debug_tensor_watch
from tensorflow.python.debug.lib.debug_utils import watch_graph
from tensorflow.python.debug.lib.debug_utils import watch_graph_with_blacklists
from tensorflow.python.debug.wrappers.dumping_wrapper import DumpingDebugWrapperSession
from tensorflow.python.debug.wrappers.framework import WatchOptions
from tensorflow.python.debug.wrappers.grpc_wrapper import GrpcDebugWrapperSession
from tensorflow.python.debug.wrappers.grpc_wrapper import TensorBoardDebugWrapperSession
from tensorflow.python.debug.wrappers.hooks import DumpingDebugHook
from tensorflow.python.debug.wrappers.hooks import GrpcDebugHook
from tensorflow.python.debug.wrappers.hooks import LocalCLIDebugHook
from tensorflow.python.debug.wrappers.hooks import TensorBoardDebugHook
from tensorflow.python.debug.wrappers.local_cli_wrapper import LocalCLIDebugWrapperSession
from tensorflow.python.util import all_util as _all_util
_all_util.remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/python/debug/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for local command-line-interface debug wrapper session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
class LocalCLIDebuggerWrapperSessionForTest(
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Subclasses the wrapper class for testing.
Overrides its CLI-related methods for headless testing environments.
Inserts observer variables for assertions.
"""
def __init__(self,
command_sequence,
sess,
dump_root=None):
"""Constructor of the for-test subclass.
Args:
command_sequence: (list of list of str) A list of command arguments,
including the command prefix, each element of the list is such as:
["run", "-n"],
["print_feed", "input:0"].
sess: See the doc string of LocalCLIDebugWrapperSession.__init__.
dump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.
"""
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self, sess, dump_root=dump_root, log_usage=False)
self._command_sequence = command_sequence
self._command_pointer = 0
# Observer variables.
self.observers = {
"debug_dumps": [],
"tf_errors": [],
"run_start_cli_run_numbers": [],
"run_end_cli_run_numbers": [],
"print_feed_responses": [],
"profiler_py_graphs": [],
"profiler_run_metadata": [],
}
def _prep_cli_for_run_start(self):
pass
def _prep_debug_cli_for_run_end(self,
debug_dump,
tf_error,
passed_filter,
passed_filter_exclude_op_names):
self.observers["debug_dumps"].append(debug_dump)
self.observers["tf_errors"].append(tf_error)
def _prep_profile_cli_for_run_end(self, py_graph, run_metadata):
self.observers["profiler_py_graphs"].append(py_graph)
self.observers["profiler_run_metadata"].append(run_metadata)
def _launch_cli(self):
if self._is_run_start:
self.observers["run_start_cli_run_numbers"].append(self._run_call_count)
else:
self.observers["run_end_cli_run_numbers"].append(self._run_call_count)
readline_cli = ui_factory.get_ui("readline")
self._register_this_run_info(readline_cli)
while True:
command = self._command_sequence[self._command_pointer]
self._command_pointer += 1
try:
if command[0] == "run":
self._run_handler(command[1:])
elif command[0] == "print_feed":
self.observers["print_feed_responses"].append(
self._print_feed_handler(command[1:]))
else:
raise ValueError("Unrecognized command prefix: %s" % command[0])
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
@test_util.run_v1_only("b/120545219")
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.VariableV1(10.0, name="v")
self.w = variables.VariableV1(21.0, name="w")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.w_int = control_flow_ops.with_dependencies(
[self.inc_v],
math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
name="w_int_outer")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sparse_ph = array_ops.sparse_placeholder(
dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config_proto = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config_proto)
# Initialize variable.
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
def testConstructWrapper(self):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), log_usage=False)
def testConstructWrapperWithExistingEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
self.assertTrue(os.path.isdir(self._tmp_dir))
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
dir_path = os.path.join(self._tmp_dir, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "dump_root path points to a non-empty directory"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self):
os.mkdir(self._tmp_dir)
file_path = os.path.join(self._tmp_dir, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(os.path.isfile(file_path))
with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=file_path, log_usage=False)
def testRunsUnderDebugMode(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Assert correct run call numbers for which the CLI has been launched at
# run-start and run-end.
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the TensorFlow runtime errors are picked up and in this case,
# they should be both None.
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunsWithEmptyStringDumpRootWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root="")
# run under debug mode.
wrapped_sess.run(self.inc_v)
self.assertAllClose(11.0, self.sess.run(self.v))
def testRunInfoOutputAtRunEndIsCorrect(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
tfdbg_logo = cli_shared.get_tfdbg_logo()
# The run_info output in the first run() call should contain the tfdbg logo.
self.assertEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
# The run_info output in the second run() call should NOT contain the logo.
self.assertNotEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
def testRunsUnderNonDebugMode(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-n"], ["run", "-n"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunningWithSparsePlaceholderFeedWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
sparse_feed = ([[0, 1], [0, 2]], [10.0, 20.0])
sparse_result = wrapped_sess.run(
self.sparse_add, feed_dict={self.sparse_ph: sparse_feed})
self.assertAllEqual([[0, 1], [0, 2]], sparse_result.indices)
self.assertAllClose([20.0, 40.0], sparse_result.values)
def testRunsUnderNonDebugThenDebugMode(self):
# Do two NON_DEBUG_RUNs, followed by DEBUG_RUNs.
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-n"], ["run"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
# Here, the CLI should have been launched only under the third run,
# because the first and second runs are NON_DEBUG.
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesWithinLimit(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-t", "3"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesOverLimit(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-t", "3"]], self.sess, dump_root=self._tmp_dir)
# run twice, which is less than the number of times specified by the
# command.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([], wrapped_sess.observers["tf_errors"])
def testRunMixingDebugModeAndMultpleTimes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-t", "2"], ["run"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run four times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1, 2],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3, 4], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testDebuggingMakeCallableTensorRunnerWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
v = variables.VariableV1(42)
tensor_runner = wrapped_sess.make_callable(v)
self.sess.run(v.initializer)
self.assertAllClose(42, tensor_runner())
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
def testDebuggingMakeCallableTensorRunnerWithCustomRunOptionsWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
a = constant_op.constant(42)
tensor_runner = wrapped_sess.make_callable(a)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(
42, tensor_runner(options=run_options, run_metadata=run_metadata))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testDebuggingMakeCallableOperationRunnerWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
v = variables.VariableV1(10.0)
inc_v = state_ops.assign_add(v, 1.0)
op_runner = wrapped_sess.make_callable(inc_v.op)
self.sess.run(v.initializer)
op_runner()
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual(11.0, self.sess.run(v))
def testDebuggingMakeCallableRunnerWithFeedListWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
ph1 = array_ops.placeholder(dtypes.float32)
ph2 = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph1, ph2)
tensor_runner = wrapped_sess.make_callable(a, feed_list=[ph1, ph2])
self.assertAllClose(42.0, tensor_runner(41.0, 1.0))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
def testDebuggingMakeCallableFromOptionsWithZeroFeedWorks(self):
variable_1 = variables.VariableV1(
10.5, dtype=dtypes.float32, name="variable_1")
a = math_ops.add(variable_1, variable_1, "callable_a")
math_ops.add(a, a, "callable_b")
self.sess.run(variable_1.initializer)
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]] * 3, self.sess, dump_root=self._tmp_dir)
callable_options = config_pb2.CallableOptions()
callable_options.fetch.append("callable_b")
sess_callable = wrapped_sess._make_callable_from_options(callable_options)
for _ in range(2):
callable_output = sess_callable()
self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
for debug_dump in debug_dumps:
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertItemsEqual(
["callable_a", "callable_b", "variable_1", "variable_1/read"],
node_names)
def testDebuggingMakeCallableFromOptionsWithOneFeedWorks(self):
ph1 = array_ops.placeholder(dtypes.float32, name="callable_ph1")
a = math_ops.add(ph1, ph1, "callable_a")
math_ops.add(a, a, "callable_b")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]] * 3, self.sess, dump_root=self._tmp_dir)
callable_options = config_pb2.CallableOptions()
callable_options.feed.append("callable_ph1")
callable_options.fetch.append("callable_b")
sess_callable = wrapped_sess._make_callable_from_options(callable_options)
ph1_value = np.array([10.5, -10.5], dtype=np.float32)
for _ in range(2):
callable_output = sess_callable(ph1_value)
self.assertAllClose(
np.array([42.0, -42.0], dtype=np.float32), callable_output[0])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
for debug_dump in debug_dumps:
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertItemsEqual(["callable_a", "callable_b"], node_names)
def testDebuggingMakeCallableFromOptionsWithTwoFeedsWorks(self):
ph1 = array_ops.placeholder(dtypes.float32, name="callable_ph1")
ph2 = array_ops.placeholder(dtypes.float32, name="callable_ph2")
a = math_ops.add(ph1, ph2, "callable_a")
math_ops.add(a, a, "callable_b")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]] * 3, self.sess, dump_root=self._tmp_dir)
callable_options = config_pb2.CallableOptions()
callable_options.feed.append("callable_ph1")
callable_options.feed.append("callable_ph2")
callable_options.fetch.append("callable_b")
sess_callable = wrapped_sess._make_callable_from_options(callable_options)
ph1_value = np.array(5.0, dtype=np.float32)
ph2_value = np.array(16.0, dtype=np.float32)
for _ in range(2):
callable_output = sess_callable(ph1_value, ph2_value)
self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
for debug_dump in debug_dumps:
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertItemsEqual(["callable_a", "callable_b"], node_names)
def testDebugMakeCallableFromOptionsWithCustomOptionsAndMetadataWorks(self):
variable_1 = variables.VariableV1(
10.5, dtype=dtypes.float32, name="variable_1")
a = math_ops.add(variable_1, variable_1, "callable_a")
math_ops.add(a, a, "callable_b")
self.sess.run(variable_1.initializer)
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
callable_options = config_pb2.CallableOptions()
callable_options.fetch.append("callable_b")
callable_options.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
sess_callable = wrapped_sess._make_callable_from_options(callable_options)
run_metadata = config_pb2.RunMetadata()
# Call the callable with a custom run_metadata.
callable_output = sess_callable(run_metadata=run_metadata)
# Verify that step_stats is populated in the custom run_metadata.
self.assertTrue(run_metadata.step_stats)
self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(1, len(debug_dumps))
debug_dump = debug_dumps[0]
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertItemsEqual(
["callable_a", "callable_b", "variable_1", "variable_1/read"],
node_names)
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
# Do a run that should lead to an TensorFlow runtime error.
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0], [1.0], [2.0]]})
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the runtime error is caught by the wrapped session properly.
self.assertEqual(1, len(wrapped_sess.observers["tf_errors"]))
tf_error = wrapped_sess.observers["tf_errors"][0]
self.assertEqual("y", tf_error.op.name)
def testRuntimeErrorBeforeGraphExecutionIsRaised(self):
# Use an impossible device name to cause an error before graph execution.
with ops.device("/device:GPU:1337"):
w = variables.VariableV1([1.0] * 10, name="w")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]], self.sess, dump_root=self._tmp_dir)
with self.assertRaisesRegexp(errors.OpError, r".*[Dd]evice.*1337.*"):
wrapped_sess.run(w)
def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "v_greater_than_twelve"],
["run", "-f", "v_greater_than_twelve"],
["run"]],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
# Verify that adding the same tensor filter more than once is tolerated
# (i.e., as if it were added only once).
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunTillFilterPassesWithExcludeOpNames(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "greater_than_twelve",
"--filter_exclude_node_names", "inc_v.*"],
["run"], ["run"]],
self.sess,
dump_root=self._tmp_dir)
def greater_than_twelve(datum, tensor):
del datum # Unused.
return tensor > 12.0
# Verify that adding the same tensor filter more than once is tolerated
# (i.e., as if it were added only once).
wrapped_sess.add_tensor_filter("greater_than_twelve", greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# Due to the --filter_exclude_op_names flag, the run-end CLI should show up
# not after run 3, but after run 4.
self.assertEqual([4], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunTillFilterPassesWorksInConjunctionWithOtherNodeNameFilter(self):
"""Test that --.*_filter flags work in conjunction with -f.
In other words, test that you can use a tensor filter on a subset of
the tensors.
"""
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "v_greater_than_twelve", "--node_name_filter", "v$"],
["run", "-f", "v_greater_than_twelve", "--node_name_filter", "v$"],
["run"]],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
self.assertEqual(1, len(debug_dumps[0].dumped_tensor_data))
self.assertEqual("v:0", debug_dumps[0].dumped_tensor_data[0].tensor_name)
self.assertEqual(1, len(debug_dumps[1].dumped_tensor_data))
self.assertEqual("v:0", debug_dumps[1].dumped_tensor_data[0].tensor_name)
def testRunsUnderDebugModeWithWatchFnFilteringNodeNames(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--node_name_filter", "inc.*"],
["run", "--node_name_filter", "delta"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringOpTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--node_name_filter", "delta"],
["run", "--op_type_filter", "AssignAdd"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringTensorDTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--op_type_filter", "Variable.*"],
["run", "--tensor_dtype_filter", "int32"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(2, dumps.size)
self.assertItemsEqual(
["v", "w"], [dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(2, dumps.size)
self.assertEqual(
["w_int_inner", "w_int_outer"],
[dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
def testRunsUnderDebugModeWithWatchFnFilteringOpTypesAndTensorDTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--op_type_filter", "Cast", "--tensor_dtype_filter", "int32"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("w_int_inner", dumps.dumped_tensor_data[0].node_name)
def testPrintFeedPrintsFeedValueForTensorFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "ph:0"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["Tensor \"ph:0 (feed)\":", "", "[[0.0, 1.0, 2.0]]"],
print_feed_responses[0].lines)
def testPrintFeedPrintsFeedValueForTensorNameFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "ph:0"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={"ph:0": [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["Tensor \"ph:0 (feed)\":", "", "[[0.0, 1.0, 2.0]]"],
print_feed_responses[0].lines)
def testPrintFeedPrintsErrorForInvalidFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "spam"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={"ph:0": [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["ERROR: The feed_dict of the current run does not contain the key "
"spam"], print_feed_responses[0].lines)
def testPrintFeedPrintsErrorWhenFeedDictIsNone(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "spam"], ["run"], ["run"]], self.sess)
wrapped_sess.run(self.w_int)
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["ERROR: The feed_dict of the current run is None or empty."],
print_feed_responses[0].lines)
def testRunUnderProfilerModeWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-p"], ["run"]], self.sess)
wrapped_sess.run(self.w_int)
self.assertEqual(1, len(wrapped_sess.observers["profiler_run_metadata"]))
self.assertTrue(
wrapped_sess.observers["profiler_run_metadata"][0].step_stats)
self.assertEqual(1, len(wrapped_sess.observers["profiler_py_graphs"]))
self.assertIsInstance(
wrapped_sess.observers["profiler_py_graphs"][0], ops.Graph)
def testCallingHookDelBeforeAnyRun(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess)
del wrapped_sess
def testCallingShouldStopMethodOnNonWrappedNonMonitoredSessionErrors(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess)
with self.assertRaisesRegexp(
ValueError,
r"The wrapped session .* does not have a method .*should_stop.*"):
wrapped_sess.should_stop()
def testLocalCLIDebugWrapperSessionWorksOnMonitoredSession(self):
monitored_sess = monitored_session.MonitoredSession()
wrapped_monitored_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], monitored_sess)
self.assertFalse(wrapped_monitored_sess.should_stop())
def testRunsWithEmptyFetchWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]], self.sess, dump_root="")
run_output = wrapped_sess.run([])
self.assertEqual([], run_output)
def testDebuggingKerasFitWithSkippedRunsWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run", "-t", "10"]], self.sess)
backend.set_session(wrapped_sess)
model = sequential.Sequential()
model.add(core.Dense(4, input_shape=[2], activation="relu"))
model.add(core.Dense(1))
model.compile(loss="mse", optimizer="sgd")
x = np.zeros([8, 2])
y = np.zeros([8, 1])
model.fit(x, y, epochs=2)
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
def testDebuggingKerasFitWithProfilingWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-p"]] * 10, self.sess)
backend.set_session(wrapped_sess)
model = sequential.Sequential()
model.add(core.Dense(4, input_shape=[2], activation="relu"))
model.add(core.Dense(1))
model.compile(loss="mse", optimizer="sgd")
x = np.zeros([8, 2])
y = np.zeros([8, 1])
model.fit(x, y, epochs=2)
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
def testRunsWithEmptyNestedFetchWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]], self.sess, dump_root="")
run_output = wrapped_sess.run({"foo": {"baz": []}, "bar": ()})
self.assertEqual({"foo": {"baz": []}, "bar": ()}, run_output)
def testSessionRunHook(self):
a = array_ops.placeholder(dtypes.float32, [10])
b = a + 1
c = b * 2
class Hook(session_run_hook.SessionRunHook):
def before_run(self, _):
return session_run_hook.SessionRunArgs(fetches=c)
class Hook2(session_run_hook.SessionRunHook):
def before_run(self, _):
return session_run_hook.SessionRunArgs(fetches=b)
sess = session.Session()
sess = LocalCLIDebuggerWrapperSessionForTest([["run"], ["run"]], sess)
class SessionCreator(object):
def create_session(self):
return sess
final_sess = monitored_session.MonitoredSession(
session_creator=SessionCreator(), hooks=[Hook(), Hook2()])
final_sess.run(b, feed_dict={a: np.arange(10)})
debug_dumps = sess.observers["debug_dumps"]
self.assertEqual(1, len(debug_dumps))
debug_dump = debug_dumps[0]
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertIn(b.op.name, node_names)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.