python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the basic data structures and algorithms for profiling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import step_stats_pb2
from tensorflow.python.debug.lib import profiling
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class AggregateProfile(test_util.TensorFlowTestCase):
def setUp(self):
node_1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
self.profile_datum_1 = profiling.ProfileDatum(
"cpu:0", node_1, "/foo/bar.py", 10, "func1", "Add")
node_2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
op_start_rel_micros=13,
op_end_rel_micros=16,
all_end_rel_micros=17)
self.profile_datum_2 = profiling.ProfileDatum(
"cpu:0", node_2, "/foo/bar.py", 11, "func1", "Mul")
node_3 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=103,
op_end_rel_micros=105,
all_end_rel_micros=4)
self.profile_datum_3 = profiling.ProfileDatum(
"cpu:0", node_3, "/foo/bar.py", 12, "func1", "Add")
node_4 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=203,
op_end_rel_micros=205,
all_end_rel_micros=4)
self.profile_datum_4 = profiling.ProfileDatum(
"gpu:0", node_4, "/foo/bar.py", 13, "func1", "Add")
def testAggregateProfileConstructorWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
self.assertEqual(2, aggregate_data.total_op_time)
self.assertEqual(4, aggregate_data.total_exec_time)
self.assertEqual(1, aggregate_data.node_count)
self.assertEqual(1, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithDifferentNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_2)
self.assertEqual(5, aggregate_data.total_op_time)
self.assertEqual(21, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(2, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithSameNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_2)
aggregate_data.add(self.profile_datum_3)
self.assertEqual(7, aggregate_data.total_op_time)
self.assertEqual(25, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(3, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithDifferentDeviceSameNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_4)
self.assertEqual(4, aggregate_data.total_op_time)
self.assertEqual(8, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(2, aggregate_data.node_exec_count)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/debug/lib/profiling_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates a Keil uVision project file from a template."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import re
def sanitize_xml(unsanitized):
"""Uses a whitelist to avoid generating bad XML."""
return re.sub(r'[^a-zA-Z0-9+_\-/\\.]', '', unsanitized)
def main(unused_args, flags):
"""Generates a Keil project file from a template source."""
with open(flags.input_template, 'r') as input_template_file:
template_file_text = input_template_file.read()
template_file_text = re.sub(r'%{EXECUTABLE}%', flags.executable,
template_file_text)
srcs_list = flags.srcs.split(' ')
hdrs_list = flags.hdrs.split(' ')
all_srcs_list = srcs_list + hdrs_list
all_srcs_list.sort()
replace_srcs = ''
for src in all_srcs_list:
if not src:
continue
ext = os.path.splitext(src)[1]
# These extension indexes are used by uVision to keep track of the type
# of files. I determined them by experimentation, since the file format
# isn't documented.
if ext == '.h':
ext_index = '5'
elif ext == '.c':
ext_index = '1'
elif ext == '.cc' or ext == '.cpp':
ext_index = '8'
else:
ext_index = '5'
basename = sanitize_xml(os.path.basename(src))
clean_src = sanitize_xml(src)
replace_srcs += ' <File>\n'
replace_srcs += ' <FileName>' + basename + '</FileName>\n'
replace_srcs += ' <FileType>' + ext_index + '</FileType>\n'
replace_srcs += ' <FilePath>' + clean_src + '</FilePath>\n'
replace_srcs += ' </File>\n'
template_file_text = re.sub(r'%{SRCS}%', replace_srcs, template_file_text)
include_paths = re.sub(' ', ';', flags.include_paths)
template_file_text = re.sub(r'%{INCLUDE_PATHS}%', include_paths,
template_file_text)
with open(flags.output_file, 'w') as output_file:
output_file.write(template_file_text)
def parse_args():
"""Converts the raw arguments into accessible flags."""
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--input_template',
type=str,
default='',
help='Path to template project file to build from.')
parser.add_argument(
'--output_file',
type=str,
default='',
help='Path to write the completed project file to.')
parser.add_argument(
'--executable',
type=str,
default='',
help='Name of the executable the project will build.')
parser.add_argument(
'--hdrs',
type=str,
default='',
help='Space-separated list of C or C++ source files to compile.')
parser.add_argument(
'--srcs',
type=str,
default='',
help='Space-separated list of C or C++ header files to include.')
parser.add_argument(
'--include_paths',
type=str,
default='',
help='Space-separated list of paths to look for header files on.')
flags, unparsed = parser.parse_known_args()
main(unparsed, flags)
if __name__ == '__main__':
parse_args()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/micro/tools/make/generate_keil_project.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resolves non-system C/C++ includes to their full paths to help Arduino."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import re
import sys
def replace_includes(line, supplied_headers_list):
"""Updates any includes to reference the new Arduino library paths."""
include_match = re.match(r'(.*#include.*")(.*)(")', line)
if include_match:
path = include_match.group(2)
for supplied_header in supplied_headers_list:
if supplied_header.endswith(path):
path = supplied_header
break
line = include_match.group(1) + path + include_match.group(3)
return line
def replace_main(line):
"""Updates any occurences of a bare main definition to the Arduino equivalent."""
main_match = re.match(r'(.*int )(main)(\(.*)', line)
if main_match:
line = main_match.group(1) + 'tflite_micro_main' + main_match.group(3)
return line
def main(unused_args, flags):
"""Resolves third party headers to their full paths in source code."""
input_file_lines = sys.stdin.read().split('\n')
supplied_headers_list = flags.third_party_headers.split(' ')
output_lines = []
for line in input_file_lines:
line = replace_includes(line, supplied_headers_list)
line = replace_main(line)
output_lines.append(line)
output_text = '\n'.join(output_lines)
sys.stdout.write(output_text)
def parse_args():
"""Converts the raw arguments into accessible flags."""
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--third_party_headers',
type=str,
default='',
help='Space-separated list of headers to resolve.')
flags, unparsed = parser.parse_known_args()
main(unparsed, flags)
if __name__ == '__main__':
parse_args()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/micro/tools/make/transform_arduino_source.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for raw to bitmap converter utility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import numpy as np
from tensorflow.lite.experimental.micro.examples.micro_vision.utils.raw_to_bitmap import parse_file
from tensorflow.lite.experimental.micro.examples.micro_vision.utils.raw_to_bitmap import reshape_bitmaps
from tensorflow.python.platform import test
_RGB_RAW = u"""
+++ frame +++
0x0000 0x00 0x00 0x00 0x01 0x01 0x01 0x02 0x02 0x02 0x03 0x03 0x03 0x04 0x04 0x04 0x05
0x0010 0x05 0x05 0x06 0x06 0x06 0x07 0x07 0x07 0x08 0x08 0x08 0x09 0x09 0x09 0x0a 0x0a
0x0020 0x0a 0x0b 0x0b 0x0b 0x0c 0x0c 0x0c 0x0d 0x0d 0x0d 0x0e 0x0e 0x0e 0x0f 0x0f 0x0f
--- frame ---
"""
_RGB_FLAT = np.array([[
0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8,
8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14, 14, 14,
15, 15, 15
]])
_RGB_RESHAPED = np.array(
[[[[12, 12, 12], [13, 13, 13], [14, 14, 14], [15, 15, 15]],
[[8, 8, 8], [9, 9, 9], [10, 10, 10], [11, 11, 11]],
[[4, 4, 4], [5, 5, 5], [6, 6, 6], [7, 7, 7]],
[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3]]]])
_GRAYSCALE_RAW = u"""
+++ frame +++
0x0000 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f
--- frame ---
"""
_GRAYSCALE_FLAT = np.array(
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]])
_GRAYSCALE_RESHAPED = np.array([[[12, 13, 14, 15],
[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]]])
_GRAYSCALE_RAW_MULTI = u"""
+++ frame +++
0x0000 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f
--- frame ---
+++ frame +++
0x0000 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f
--- frame ---
+++ frame +++
0x0000 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f
--- frame ---
+++ frame +++
0x0000 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f
--- frame ---
"""
_GRAYSCALE_FLAT_MULTI = [
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
np.array([16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]),
np.array([32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]),
np.array([48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])]
_GRAYSCALE_RESHAPED_MULTI = [
np.array([[12, 13, 14, 15],
[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]]),
np.array([[28, 29, 30, 31],
[24, 25, 26, 27],
[20, 21, 22, 23],
[16, 17, 18, 19]]),
np.array([[44, 45, 46, 47],
[40, 41, 42, 43],
[36, 37, 38, 39],
[32, 33, 34, 35]]),
np.array([[60, 61, 62, 63],
[56, 57, 58, 59],
[52, 53, 54, 55],
[48, 49, 50, 51]])]
class RawToBitmapTest(test.TestCase):
def testParseRgb(self):
frame_list = parse_file(io.StringIO(_RGB_RAW), 4, 4, 3)
self.assertTrue(np.array_equal(_RGB_FLAT, frame_list))
def testParseGrayscale(self):
frame_list = parse_file(io.StringIO(_GRAYSCALE_RAW), 4, 4, 1)
self.assertTrue(np.array_equal(_GRAYSCALE_FLAT, frame_list))
def testReshapeRgb(self):
reshaped = reshape_bitmaps(_RGB_FLAT, 4, 4, 3)
self.assertTrue(np.array_equal(_RGB_RESHAPED, reshaped))
def testReshapeGrayscale(self):
reshaped = reshape_bitmaps(_GRAYSCALE_FLAT, 4, 4, 1)
self.assertTrue(np.array_equal(_GRAYSCALE_RESHAPED, reshaped))
def testMultipleGrayscale(self):
frame_list = parse_file(io.StringIO(_GRAYSCALE_RAW_MULTI), 4, 4, 1)
self.assertTrue(np.array_equal(_GRAYSCALE_FLAT_MULTI, frame_list))
reshaped = reshape_bitmaps(frame_list, 4, 4, 1)
self.assertTrue(np.array_equal(_GRAYSCALE_RESHAPED_MULTI, reshaped))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/micro/examples/micro_vision/utils/raw_to_bitmap_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert raw bytes to a bitmap.
Converts a raw image dumped to a file into a bitmap. The file must contain
complete bitmap images in 324 x 244 resolution, formatted as follows:
+++ frame +++
<byte number> <16 one-byte values separated by spaces>
--- frame ---
For example, the first line might look like:
0x00000000 C5 C3 CE D1 D9 DA D6 E3 E2 EB E9 EB DB E4 F5 FF
The bitmaps are automatically saved to the same directory as the log file, and
are displayed by the script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import os.path
import re
import numpy as np
_DICT_RESOLUTIONS = {
'QVGA': (324, 244, 1),
'GRAY': (96, 96, 1),
'RGB': (96, 96, 3),
}
_VERSION = 0
_SUBVERSION = 1
def check_file_existence(x):
if not os.path.isfile(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise argparse.ArgumentTypeError('{0} does not exist'.format(x))
return x
def show_and_save_bitmaps(input_file, bitmap_list, channels):
"""Display and save a list of bitmaps.
Args:
input_file: input file name
bitmap_list: list of numpy arrays to represent bitmap images
channels: color channel count
"""
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError:
raise NotImplementedError('Image display and save not implemented.')
for idx, bitmap in enumerate(bitmap_list):
path = os.path.dirname(os.path.abspath(input_file))
basename = os.path.split(os.path.splitext(input_file)[0])[-1]
outputfile = os.path.join(path, basename + '_' + str(idx) + '.bmp')
if channels == 3:
img = Image.fromarray(bitmap, 'RGB')
else:
img = Image.fromarray(bitmap, 'L')
img.save(outputfile)
img.show()
def reshape_bitmaps(frame_list, width, height, channels):
"""Reshape flat integer arrays.
Args:
frame_list: list of 1-D arrays to represent raw image data
width: image width in pixels
height: image height in pixels
channels: color channel count
Returns:
list of numpy arrays to represent bitmap images
"""
bitmap_list = []
for frame in frame_list:
shape = (height, width, channels) if channels > 1 else (height, width)
bitmap = np.reshape(frame, shape)
bitmap = np.flip(bitmap, 0)
bitmap_list.append(bitmap)
return bitmap_list
def parse_file(inputfile, width, height, channels):
"""Convert log file to array of pixels.
Args:
inputfile: log file to parse
width: image width in pixels
height: image height in pixels
channels: color channel count
Returns:
list 1-D arrays to represent raw image data.
"""
data = None
bytes_written = 0
frame_start = False
frame_stop = False
frame_list = list()
# collect all pixel data into an int array
for line in inputfile:
if line == '+++ frame +++\n':
frame_start = True
data = np.zeros(height * width * channels, dtype=np.uint8)
bytes_written = 0
continue
elif line == '--- frame ---\n':
frame_stop = True
if frame_start and not frame_stop:
linelist = re.findall(r"[\w']+", line)
if len(linelist) != 17:
# drop this frame
frame_start = False
continue
for item in linelist[1:]:
data[bytes_written] = int(item, base=16)
bytes_written += 1
elif frame_start and frame_stop:
if bytes_written == height * width * channels:
frame_list.append(data)
frame_start = False
frame_stop = False
return frame_list
def main():
parser = argparse.ArgumentParser(
description='This program converts raw data from HM01B0 to a bmp file.')
parser.add_argument(
'-i',
'--input',
dest='inputfile',
required=True,
help='input file',
metavar='FILE',
type=check_file_existence)
parser.add_argument(
'-r',
'--resolution',
dest='resolution',
required=False,
help='Resolution',
choices=['QVGA', 'RGB', 'GRAY'],
default='QVGA',
)
parser.add_argument(
'-v',
'--version',
help='Program version',
action='version',
version='%(prog)s {ver}'.format(ver='v%d.%d' % (_VERSION, _SUBVERSION)))
args = parser.parse_args()
(width, height,
channels) = _DICT_RESOLUTIONS.get(args.resolution,
('Resolution not supported', 0, 0, 0))
frame_list = parse_file(open(args.inputfile), width, height, channels)
bitmap_list = reshape_bitmaps(frame_list, width, height, channels)
show_and_save_bitmaps(args.inputfile, bitmap_list, channels)
if __name__ == '__main__':
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/micro/examples/micro_vision/utils/raw_to_bitmap.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugging script for checking calculation values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
import matplotlib.pyplot as plt
import numpy as np
# import soundfile as sf
def new_data_to_array(fn, datatype='int16'):
"""Converts file information to an in-memory array."""
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
if datatype == 'int8':
typestr = 'b'
arraylen = int(len(b))
elif datatype == 'int16':
typestr = 'h'
arraylen = int(len(b) // 2)
elif datatype == 'int32':
typestr = 'i'
arraylen = int(len(b) // 4)
if datatype == 'uint8':
typestr = 'B'
arraylen = int(len(b))
elif datatype == 'uint16':
typestr = 'H'
arraylen = int(len(b) // 2)
elif datatype == 'uint32':
typestr = 'I'
arraylen = int(len(b) // 4)
y = np.array(struct.unpack('<' + typestr * arraylen, b))
return y
# x is the fixed-point input in Qm.n format
def to_float(x, n):
return x.astype(float) * 2**(-n)
micro_windowed_input = new_data_to_array(
'micro_windowed_input.txt', datatype='int32')
cmsis_windowed_input = new_data_to_array(
'cmsis_windowed_input.txt', datatype='int16')
micro_dft = new_data_to_array('micro_dft.txt', datatype='int32')
cmsis_dft = new_data_to_array('cmsis_dft.txt', datatype='int16')
py_dft = np.fft.rfft(to_float(cmsis_windowed_input, 15), n=512)
py_result = np.empty((2 * py_dft.size), dtype=np.float)
py_result[0::2] = np.real(py_dft)
py_result[1::2] = np.imag(py_dft)
micro_power = new_data_to_array('micro_power.txt', datatype='int32')
cmsis_power = new_data_to_array('cmsis_power.txt', datatype='int16')
py_power = np.square(np.abs(py_dft))
micro_power_avg = new_data_to_array('micro_power_avg.txt', datatype='uint8')
cmsis_power_avg = new_data_to_array('cmsis_power_avg.txt', datatype='uint8')
plt.figure(1)
plt.subplot(311)
plt.plot(micro_windowed_input, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_windowed_input, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_windowed_input, 30), label='Micro to float')
plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS to float')
plt.legend()
plt.figure(2)
plt.subplot(311)
plt.plot(micro_dft, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_dft, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_dft, 22), label='Micro to float')
# CMSIS result has 6 fractionanl bits (not 7) due to documentation error (see
# README.md)
plt.plot(to_float(cmsis_dft, 6), label='CMSIS to float')
plt.plot(py_result, label='Python result')
plt.legend()
plt.figure(3)
plt.subplot(311)
plt.plot(micro_power, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_power[0:256], label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_power, 22), label='Micro to float')
plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS to float')
plt.plot(py_power, label='Python result')
plt.legend()
plt.figure(4)
plt.plot(micro_power_avg, label='Micro fixed')
plt.plot(cmsis_power_avg, label='CMSIS fixed')
plt.legend()
plt.show()
# t = np.arange(16000.*0.03)/16000.
# # Factor of 10 because micro preprocessing overflows otherwise
# sin1k = 0.1*np.sin(2*np.pi*1000*t)
#
# plt.figure(1)
# plt.subplot(511)
# plt.plot(sin1k)
# plt.title('Input sine')
#
# plt.subplot(512)
# plt.plot(to_float(micro_windowed_input, 30), label='Micro-Lite')
# plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS')
# plt.title('Windowed sine')
# plt.legend(loc='center right')
#
# plt.subplot(513)
# plt.plot(to_float(micro_dft, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_dft, 6), label='CMSIS')
# plt.title('FFT')
# plt.legend(loc='center')
#
# plt.subplot(514)
# plt.plot(to_float(micro_power, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS')
# plt.title('|FFT|^2')
# plt.legend(loc='center right')
#
# plt.subplot(515)
# plt.plot(micro_power_avg, label='Micro-Lite')
# plt.plot(cmsis_power_avg, label='CMSIS')
# plt.title('Averaged |FFT|^2')
# plt.legend(loc='center right')
#
# plt.tight_layout(pad=0, w_pad=0.2, h_pad=0.2)
#
# plt.show()
#
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/compare_1k.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts values pulled from the microcontroller into audio files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
# import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
def new_data_to_array(fn):
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
y = struct.unpack('<' + 'h' * int(len(b) / 2), b)
return y
data = 'captured_data.txt'
values = np.array(new_data_to_array(data)).astype(float)
# plt.plot(values, 'o-')
# plt.show(block=False)
wav = values / np.max(np.abs(values))
sf.write('captured_data.wav', wav, 16000)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/captured_data_to_wav.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Outputs tables used for fast calculations at runtime."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import soundfile as sf
import numpy as np
def to_cc(x, varname, directory='', scale_factor=1):
"""Writes table values to a C++ source file."""
x = (x / np.max(np.abs(x))) * 32768 * scale_factor
x[x > 32767] = 32767
x[x < -32768] = -32768
x = x.astype(int)
x = [str(v) if i % 10 != 0 else '\n ' + str(v) for i, v in enumerate(x)]
cmsis_path = 'tensorflow/lite/experimental/micro/examples/micro_speech/CMSIS'
xstr = '#include "{}/{}.h"\n\n'.format(cmsis_path, varname)
xstr += 'const int g_{}_size = {};\n'.format(varname, len(x))
xstr += 'const int16_t g_{}[{}] = {{{}}};\n'.format(varname, len(x),
', '.join(x))
with open(directory + varname + '.cc', 'w') as f:
f.write(xstr)
def to_h(_, varname, directory=''):
"""Writes a header file for the table values."""
tf_prepend = 'TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_'
xstr = '#ifndef {}{}_H_\n'.format(tf_prepend, varname.upper())
xstr += '#define {}{}_H_\n\n'.format(tf_prepend, varname.upper())
xstr += '#include <cstdint>\n\n'
xstr += 'extern const int g_{}_size;\n'.format(varname)
xstr += 'extern const int16_t g_{}[];\n\n'.format(varname)
xstr += '#endif // {}{}_H_'.format(tf_prepend, varname.upper())
with open(directory + varname + '.h', 'w') as f:
f.write(xstr)
# x = sf.read('yes_f2e59fea_nohash_1.wav')[0]
# to_cc(x, 'yes_waveform')
# to_h(x, 'yes_waveform')
#
# x = sf.read('no_f9643d42_nohash_4.wav')[0]
# to_cc(x, 'no_waveform')
# to_h(x, 'no_waveform')
# 30ms of data @ 16 kHz = 480 samples
hann = np.hanning(int(16000 * 0.03)) # Window 30ms of data
to_cc(hann, 'hanning', directory='./')
to_h(hann, 'hanning', directory='./')
t = np.arange(16000. * 0.03) / 16000.
sin1k = np.sin(
2 * np.pi * 1000 *
t) # Factor of 10 because micro preprocessing overflows otherwise
to_cc(sin1k, 'sin_1k', directory='./', scale_factor=0.1)
to_h(sin1k, 'sin_1k', directory='./')
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/micro/examples/micro_speech/CMSIS/create_constants.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(BidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(BidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn1"),
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self,
fw_rnn_layer,
bw_rnn_layer,
is_dynamic_rnn,
is_inference,
use_sequence_length=False):
"""Build Mnist recognition model.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
batch_size = self.batch_size
if is_inference:
batch_size = 1
# input image placeholder
x = tf.placeholder(
"float", [batch_size, self.time_steps, self.n_input],
name="INPUT_IMAGE")
sequence_length = None
if use_sequence_length:
sequence_length = [self.time_steps] * batch_size
if is_dynamic_rnn:
rnn_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
sequence_length,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
rnn_inputs = tf.unstack(x, self.time_steps, 1)
# Sequence length is not supported for static since we don't have a
# wrapper for it. At training phase, we can still have sequence_length,
# but inference phase, we change it to None.
if is_inference:
sequence_length = None
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
dtype="float32",
sequence_length=sequence_length)
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self,
fw_rnn_layer,
bw_rnn_layer,
sess,
saver,
is_dynamic_rnn,
use_sequence_length=False):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
sess: Old session.
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(
fw_rnn_layer, bw_rnn_layer, is_dynamic_rnn, True, use_sequence_length)
new_sess = tf.compat.v1.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), False, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), self.buildRnnLayer(), sess, saver, False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
def testStaticRnnMultiRnnCellWithSequenceLength(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
False,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
False,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), True, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCellWithSequenceLength(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
True,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_rnn_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TfLite BasicRnnCell wrapper.
TODO(renjieliu): Find a better home for this one.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.lite.python.op_hint import OpHint
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["lite.experimental.nn.TfLiteRNNCell"])
class TfLiteRNNCell(rnn_cell_impl.LayerRNNCell):
"""The most basic RNN cell.
This is used only for TfLite, it provides hints and it also makes the
variables in the desired for the tflite ops.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initializes the parameters for an RNN cell.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. Raises an error if not `True` and the existing scope
already has the given variables.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
Raises:
ValueError: If the existing scope already has the given variables.
"""
super(TfLiteRNNCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
# Inputs must be Rank-2.
self.input_spec = base_layer.InputSpec(ndim=2)
self._tflite_wrapper = OpHint("UnidirectionalSequenceRnn")
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
"""Builds the RNN cell.
Args:
inputs_shape: Rnn input tensor shape.
Raises:
ValueError: If last dimension of the input shape is not known.
"""
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
(inputs_shape,))
input_depth = inputs_shape[-1]
def add_variable_wrapped(name, shape, initializer, index):
var = self.add_weight(name, shape=shape, initializer=initializer)
return self._tflite_wrapper.add_input(
var, name=name, index_override=index)
self._input_weights = add_variable_wrapped(
"input_weights", [self._num_units, input_depth], None, 1)
self._recurrent_weights = add_variable_wrapped(
"recurrent_weights", [self._num_units, self._num_units], None, 2)
self._bias = add_variable_wrapped(
"bias",
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype),
index=3)
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
inputs = self._tflite_wrapper.add_input(
inputs, tag="input", name="input", aggregate="stack", index_override=0)
state = self._tflite_wrapper.add_input(
state,
tag="hidden_state",
name="hidden_state",
aggregate="first",
index_override=4)
weights = array_ops.transpose(
array_ops.concat([self._input_weights, self._recurrent_weights], 1))
gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1), weights)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
output = self._tflite_wrapper.add_output(
output,
tag="output",
name="output",
index_override=1,
aggregate="stack")
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(TfLiteRNNCell, self).get_config()
return dict(itertools.chain(base_config.items(), config.items()))
@tf_export(v1=["lite.experimental.nn.TFLiteLSTMCell"])
class TFLiteLSTMCell(rnn_cell_impl.LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This is used only for TfLite, it provides hints and it also makes the
variables in the desired for the tflite ops (transposed and seaparated).
The default non-peephole implementation is based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=None,
num_proj_shards=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1 in
order to reduce the scale of forgetting at the beginning of the
training. Must set it manually to `0.0` when restoring from CudnnLSTM
trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of the
`c_state` and `m_state`. If False, they are concatenated along the
column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`. When
restoring from CudnnLSTM-trained checkpoints, use
`CudnnCompatibleLSTMCell` instead.
"""
super(TFLiteLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
# TODO(raziel): decide if we want to just support tuples (yes please!).
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
# Inputs must be 2-dimensional.
# TODO(raziel): layers stuff -- chop if un-layerizing Op.
self.input_spec = base_layer.InputSpec(ndim=2)
self._tflite_wrapper = OpHint("UnidirectionalSequenceLstm")
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
self._output_size = num_proj if num_proj else num_units
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, self._output_size)
if state_is_tuple else num_units + self._output_size)
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def build(self, inputs_shape):
"""Build TfLite LSTM cell graph.
Args:
inputs_shape: The inputs_shape must be known, and is [batch_size,
input_size] shape.
Raises:
ValueError: if the inputs_shape is invalid.
"""
if len(inputs_shape) != 2:
raise ValueError(
"inputs_shape must be 2-dimensional, saw shape: %s" % inputs_shape)
input_depth = (
inputs_shape[1]
if isinstance(inputs_shape[1], int) else inputs_shape[1].value)
if input_depth is None:
raise ValueError("Invalid inputs_shape, saw shape: %s" % inputs_shape)
maybe_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None else None)
input_weight_shape = [self._num_units, input_depth]
cell_weight_shape = [self._num_units, self._output_size]
bias_shape = [self._num_units]
def add_variable_wrapped(name, shape, initializer, index, partitioner):
var = self.add_weight(
name, shape=shape, initializer=initializer, partitioner=partitioner)
return self._tflite_wrapper.add_input(
var, name=name, index_override=index)
weight_initializer = self._initializer
if self.dtype is None:
bias_initializer = init_ops.zeros_initializer
else:
bias_initializer = init_ops.zeros_initializer(dtype=self.dtype)
forget_bias_initializer = init_ops.constant_initializer(self._forget_bias)
self.input_to_input_w = add_variable_wrapped(
"input_to_input_w", input_weight_shape, weight_initializer, 1,
maybe_partitioner)
self.input_to_forget_w = add_variable_wrapped(
"input_to_forget_w", input_weight_shape, weight_initializer, 2,
maybe_partitioner)
self.input_to_cell_w = add_variable_wrapped(
"input_to_cell_w", input_weight_shape, weight_initializer, 3,
maybe_partitioner)
self.input_to_output_w = add_variable_wrapped(
"input_to_output_w", input_weight_shape, weight_initializer, 4,
maybe_partitioner)
self.cell_to_input_w = add_variable_wrapped(
"cell_to_input_w", cell_weight_shape, weight_initializer, 5,
maybe_partitioner)
self.cell_to_forget_w = add_variable_wrapped(
"cell_to_forget_w", cell_weight_shape, weight_initializer, 6,
maybe_partitioner)
self.cell_to_cell_w = add_variable_wrapped(
"cell_to_cell_w", cell_weight_shape, weight_initializer, 7,
maybe_partitioner)
self.cell_to_output_w = add_variable_wrapped(
"cell_to_output_w", cell_weight_shape, weight_initializer, 8,
maybe_partitioner)
self.input_bias = add_variable_wrapped(
"input_bias", bias_shape, bias_initializer, 12, maybe_partitioner)
self.forget_bias = add_variable_wrapped("forget_bias", bias_shape,
forget_bias_initializer, 13,
maybe_partitioner)
self.cell_bias = add_variable_wrapped(
"cell_bias", bias_shape, bias_initializer, 14, maybe_partitioner)
self.output_bias = add_variable_wrapped(
"output_bias", bias_shape, bias_initializer, 15, maybe_partitioner)
# index 9, 10, 11.
# f stands for forget, i stands for input and o stands for output.
if self._use_peepholes:
self._w_f_diag = add_variable_wrapped("w_f_diag", [self._num_units],
self._initializer, 10,
maybe_partitioner)
self._w_i_diag = add_variable_wrapped("w_i_diag", [self._num_units],
self._initializer, 9,
maybe_partitioner)
self._w_o_diag = add_variable_wrapped("w_o_diag", [self._num_units],
self._initializer, 11,
maybe_partitioner)
# index 16 for proj kernel.
if self._num_proj is not None:
maybe_proj_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None else None)
self._proj_kernel = add_variable_wrapped(
"projection/kernel", [self._num_proj, self._num_units],
self._initializer,
16,
partitioner=maybe_proj_partitioner)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, `[batch, num_units]`.
state: if `state_is_tuple` is False, this must be a state Tensor, `2-D,
[batch, state_size]`. If `state_is_tuple` is True, this must be a tuple
of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
inputs = self._tflite_wrapper.add_input(
inputs, tag="input", name="input", aggregate="stack", index_override=0)
# Make sure inputs and bias_initializer has the same type.
assert inputs.dtype == self.input_to_input_w.dtype
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
# Note: For TfLite, cell_state is at index 19 while activation state at
# index 18.
c_prev = self._tflite_wrapper.add_input(
c_prev,
tag="c_prev",
name="c_prev",
aggregate="first",
index_override=19)
m_prev = self._tflite_wrapper.add_input(
m_prev,
tag="m_prev",
name="m_prev",
aggregate="first",
index_override=18)
input_size = inputs.shape.with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.shape[-1]")
inputs_and_m_prev = array_ops.concat([inputs, m_prev], axis=1)
# i stands for input gate.
# f stands for forget gate activation.
# o outputs.
# j output of LSTM unit.
# c is the final state.
# m is the output.
i = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_input_w, self.cell_to_input_w],
axis=1),
transpose_b=True), self.input_bias)
f = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_forget_w, self.cell_to_forget_w],
axis=1),
transpose_b=True), self.forget_bias)
o = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_output_w, self.cell_to_output_w],
axis=1),
transpose_b=True), self.output_bias)
j = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_cell_w, self.cell_to_cell_w],
axis=1),
transpose_b=True), self.cell_bias)
# Diagonal connections
if self._use_peepholes:
c = (
sigmoid(f + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f) * c_prev + sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
transposed_proj_kernel = array_ops.transpose(self._proj_kernel)
m = math_ops.matmul(m, transposed_proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
c = self._tflite_wrapper.add_output(
c, tag="c", name="c", aggregate="last", index_override=1)
m = self._tflite_wrapper.add_output(
m, tag="m", name="m", index_override=2, aggregate="stack")
new_state = (
rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(TFLiteLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/examples/lstm/rnn_cell.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class BidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
def setUp(self):
tf.reset_default_graph()
# Import MNIST dataset
self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Lstm Units.
self.num_units = 16
def buildLstmLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, use_peepholes=True, forget_bias=0, name="rnn1"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, num_proj=8, forget_bias=0, name="rnn2"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units // 2,
use_peepholes=True,
num_proj=8,
forget_bias=0,
name="rnn3"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, forget_bias=0, name="rnn4")
])
def buildModel(self, fw_lstm_layer, bw_lstm_layer, is_dynamic_rnn):
"""Build Mnist recognition model.
Args:
fw_lstm_layer: The forward lstm layer either a single lstm cell or a multi
lstm cell.
bw_lstm_layer: The backward lstm layer either a single lstm cell or a
multi lstm cell.
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
if is_dynamic_rnn:
lstm_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_lstm_layer,
bw_lstm_layer,
lstm_inputs,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_lstm_layer, bw_lstm_layer, lstm_input, dtype="float32")
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, fw_lstm_layer, bw_lstm_layer, sess, saver,
is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_lstm_layer: The forward lstm layer either a single lstm cell or a multi
lstm cell.
bw_lstm_layer: The backward lstm layer either a single lstm cell or a
multi lstm cell.
sess: Old session.
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp()
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(fw_lstm_layer, bw_lstm_layer,
is_dynamic_rnn)
new_sess = tf.compat.v1.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
try:
interpreter.allocate_tensors()
except ValueError:
assert False
input_index = (interpreter.get_input_details()[0]["index"])
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = (interpreter.get_output_details()[0]["index"])
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(self.buildLstmLayer(),
self.buildLstmLayer(), False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), self.buildLstmLayer(), sess, saver, False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(self.buildLstmLayer(),
self.buildLstmLayer(), True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(),
self.buildLstmLayer(),
sess,
saver,
is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_lstm_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(UnidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(UnidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn1"),
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self, rnn_layer, is_dynamic_rnn):
"""Build Mnist recognition model.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
rnn_input = tf.transpose(x, perm=[1, 0, 2])
outputs, _ = tf.lite.experimental.nn.dynamic_rnn(
rnn_layer, rnn_input, dtype="float32")
outputs = tf.unstack(outputs, axis=0)
else:
rnn_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(rnn_layer, rnn_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
sess.run(tf.global_variables_initializer())
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, rnn_layer, sess, saver, is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
sess: Old session.
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(rnn_layer, is_dynamic_rnn)
new_sess = tf.compat.v1.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), is_dynamic_rnn=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver, is_dynamic_rnn=False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), is_dynamic_rnn=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver, is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_rnn_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
def setUp(self):
tf.reset_default_graph()
# Import MNIST dataset
self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Lstm Units.
self.num_units = 16
def buildLstmLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, use_peepholes=True, forget_bias=1.0, name="rnn1"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, num_proj=8, forget_bias=1.0, name="rnn2"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units // 2,
use_peepholes=True,
num_proj=8,
forget_bias=0,
name="rnn3"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, forget_bias=1.0, name="rnn4")
])
def buildModel(self, lstm_layer, is_dynamic_rnn):
"""Build Mnist recognition model.
Args:
lstm_layer: The lstm layer either a single lstm cell or a multi lstm cell.
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
lstm_input = tf.transpose(x, perm=[1, 0, 2])
outputs, _ = tf.lite.experimental.nn.dynamic_rnn(
lstm_layer, lstm_input, dtype="float32")
outputs = tf.unstack(outputs, axis=0)
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, lstm_layer, sess, saver, is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
lstm_layer: The lstm layer either a single lstm cell or a multi lstm cell.
sess: Old session.
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp()
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(lstm_layer, is_dynamic_rnn)
new_sess = tf.compat.v1.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
try:
interpreter.allocate_tensors()
except ValueError:
assert False
input_index = (interpreter.get_input_details()[0]["index"])
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = (interpreter.get_output_details()[0]["index"])
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildLstmLayer(), is_dynamic_rnn=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), sess, saver, is_dynamic_rnn=False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildLstmLayer(), is_dynamic_rnn=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), sess, saver, is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_lstm_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TfLite LSTMCell wrapper.
TODO(renjieliu): Find a better home for this one.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python.op_hint import OpHint
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.rnn import _best_effort_input_batch_size
from tensorflow.python.ops.rnn import _dynamic_rnn_loop
from tensorflow.python.ops.rnn import _should_cache
from tensorflow.python.ops.rnn import _transpose_batch_time
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["lite.experimental.nn.dynamic_rnn"])
def dynamic_rnn(cell,
inputs,
sequence_length=None,
initial_state=None,
dtype=None,
parallel_iterations=None,
swap_memory=False,
time_major=True,
scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.compat.v1.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.nn.rnn_cell.LSTMStateTuple for each cell
outputs, state = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If `time_major == True`, this must be a `Tensor` of shape: `[max_time,
batch_size, ...]`, or a nested tuple of such elements. This may also be
a (possibly nested) tuple of Tensors satisfying this property. The
first two dimensions must match across all the inputs, but otherwise the
ranks and other shape components may differ. In this case, input to
`cell` at each time-step will replicate the structure of these tuples,
except for the time dimension (from which the time is taken). The input
to `cell` at each time step will be a `Tensor` or (possibly nested)
tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used
to copy-through state and zero-out outputs when past a batch element's
sequence length. So it's more for performance than correctness.
initial_state: (optional) An initial state for the RNN. If `cell.state_size`
is an integer, this must be a `Tensor` of appropriate type and shape
`[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this
should be a tuple of tensors having shapes `[batch_size, s] for s in
cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency and
can be run in parallel, will be. This parameter trades off time for
space. Values >> 1 use more memory but take less time, while smaller
values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs which
would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors. If true,
these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false,
these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using
`time_major = True` is a bit more efficient because it avoids transposes
at the beginning and end of the RNN calculation. However, most TensorFlow
data is batch-major, so by default this function accepts input and emits
output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
RuntimeError: If not using control flow v2.
"""
# Currently only support time_major == True case.
assert time_major
# TODO(b/123051275): We need to check if the cells are TfLiteLSTMCells or
# TfLiteRNNCells.
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
raise RuntimeError("OpHint dynamic rnn only supports control flow v2.")
parent_first_child_input = [{
"parent_ophint_input_index": 0,
"first_child_ophint_input_index": 0
}]
parent_last_child_output = [{
"parent_output_index": 0,
# For LstmCell, the index is 2.
# For RnnCell, the index is 1.
# So we use -1 meaning it's the last one.
"child_output_index": -1
}]
internal_children_input_output = [{
"child_input_index": 0,
# For LstmCell, the index is 2.
# For RnnCell, the index is 1.
# So we use -1 meaning it's the last one.
"child_output_index": -1
}]
inputs_outputs_mappings = {
"parent_first_child_input": parent_first_child_input,
"parent_last_child_output": parent_last_child_output,
"internal_children_input_output": internal_children_input_output
}
tflite_wrapper = OpHint(
"TfLiteDynamicRnn",
level=2,
children_inputs_mappings=inputs_outputs_mappings)
with vs.variable_scope(scope or "rnn") as varscope:
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
inputs = tflite_wrapper.add_input(inputs, name="input", index_override=0)
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (batch, time, depth) => (time, batch, depth)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.cast(sequence_length, dtypes.int32)
if sequence_length.shape.rank not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.shape)
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length,
name="sequence_length")
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
if getattr(cell, "get_initial_state", None) is not None:
state = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), [
"Expected shape for Tensor %s is " % x.name, packed_shape,
" but saw shape: ", x_shape
])
if not context.executing_eagerly() and sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
outputs, final_state = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (time, batch, depth) => (batch, time, depth)
outputs = nest.map_structure(_transpose_batch_time, outputs)
outputs = tflite_wrapper.add_output(outputs, name="outputs")
return outputs, final_state
def bidirectional_dynamic_rnn(cell_fw,
cell_bw,
inputs,
sequence_length=None,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
parallel_iterations=None,
swap_memory=False,
time_major=False,
scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Takes input and builds independent forward and backward RNNs. The input_size
of forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If time_major == True, this must be a tensor of shape: `[max_time,
batch_size, ...]`, or a nested tuple of such elements.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch. If
not provided, all batch entries are assumed to be full sequences; and time
reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN. This must
be a tensor of appropriate type and shape `[batch_size,
cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a
tuple of tensors having shapes `[batch_size, s] for s in
cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using the
corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency and
can be run in parallel, will be. This parameter trades off time for
space. Values >> 1 use more memory but take less time, while smaller
values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs which
would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors. If true,
these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false,
these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using
`time_major = True` is a bit more efficient because it avoids transposes
at the beginning and end of the RNN calculation. However, most TensorFlow
data is batch-major, so by default this function accepts input and emits
output in batch-major form.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw)
rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw)
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state_fw,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
time_major=time_major,
scope=fw_scope)
# Backward direction
if not time_major:
time_axis = 1
batch_axis = 0
else:
time_axis = 0
batch_axis = 1
def _reverse(input_, seq_lengths, seq_axis, batch_axis):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_,
seq_lengths=seq_lengths,
seq_axis=seq_axis,
batch_axis=batch_axis)
else:
return array_ops.reverse(input_, axis=[seq_axis])
with vs.variable_scope("bw") as bw_scope:
def _map_reverse(inp):
return _reverse(
inp,
seq_lengths=sequence_length,
seq_axis=time_axis,
batch_axis=batch_axis)
inputs_reverse = nest.map_structure(_map_reverse, inputs)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw,
inputs=inputs_reverse,
sequence_length=sequence_length,
initial_state=initial_state_bw,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
time_major=time_major,
scope=bw_scope)
output_bw = _reverse(
tmp,
seq_lengths=sequence_length,
seq_axis=time_axis,
batch_axis=batch_axis)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/examples/lstm/rnn.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops util to handle ops for Lite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.lite.python import wrap_toco
from tensorflow.python.util.tf_export import tf_export
class SupportedOp(collections.namedtuple("SupportedOp", ["op"])):
"""Spec of supported ops.
Args:
op: string of op name.
"""
@tf_export(v1=["lite.experimental.get_potentially_supported_ops"])
def get_potentially_supported_ops():
"""Returns operations potentially supported by TensorFlow Lite.
The potentially support list contains a list of ops that are partially or
fully supported, which is derived by simply scanning op names to check whether
they can be handled without real conversion and specific parameters.
Given that some ops may be partially supported, the optimal way to determine
if a model's operations are supported is by converting using the TensorFlow
Lite converter.
Returns:
A list of SupportedOp.
"""
ops = wrap_toco.wrapped_get_potentially_supported_ops()
return [SupportedOp(o["op"]) for o in ops]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/tensorboard/ops_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.experimental.tensorboard import ops_util
from tensorflow.python.platform import test
class OpsUtilTest(test.TestCase):
def testGetPotentiallySupportedOps(self):
ops = ops_util.get_potentially_supported_ops()
# See GetTensorFlowNodeConverterMap() in
# tensorflow/lite/toco/import_tensorflow.cc
self.assertIsInstance(ops, list)
# Test partial ops that surely exist in the list.
self.assertIn(ops_util.SupportedOp("Add"), ops)
self.assertIn(ops_util.SupportedOp("Log"), ops)
self.assertIn(ops_util.SupportedOp("Sigmoid"), ops)
self.assertIn(ops_util.SupportedOp("Softmax"), ops)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/tensorboard/ops_util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AudioMicrofrontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op
from tensorflow.python.framework import ops
SAMPLE_RATE = 1000
WINDOW_SIZE = 25
WINDOW_STEP = 10
NUM_CHANNELS = 2
UPPER_BAND_LIMIT = 450.0
LOWER_BAND_LIMIT = 8.0
SMOOTHING_BITS = 10
class AudioFeatureGenerationTest(tf.test.TestCase):
def setUp(self):
super(AudioFeatureGenerationTest, self).setUp()
ops.disable_eager_execution()
def testSimple(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True)
self.assertAllEqual(filterbanks.eval(),
[[479, 425], [436, 378], [410, 350], [391, 325]])
def testSimpleFloatScaled(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
out_scale=64,
out_type=tf.float32)
self.assertAllEqual(filterbanks.eval(),
[[7.484375, 6.640625], [6.8125, 5.90625],
[6.40625, 5.46875], [6.109375, 5.078125]])
def testStacking(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
right_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 436, 378], [410, 350, 391, 325]])
def testStackingWithOverlap(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
right_context=1)
self.assertAllEqual(
self.evaluate(filterbanks),
[[479, 425, 479, 425, 436, 378], [479, 425, 436, 378, 410, 350],
[436, 378, 410, 350, 391, 325], [410, 350, 391, 325, 391, 325]])
def testStackingDropFrame(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 479, 425], [436, 378, 410, 350]])
def testZeroPadding(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 7 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=2,
frame_stride=3,
zero_padding=True)
self.assertAllEqual(
self.evaluate(filterbanks),
[[0, 0, 0, 0, 479, 425], [436, 378, 410, 350, 391, 325],
[374, 308, 362, 292, 352, 275]])
if __name__ == '__main__':
tf.test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/microfrontend/python/kernel_tests/audio_microfrontend_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AudioMicrofrontend Op creates filterbanks from audio data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.experimental.microfrontend.ops import gen_audio_microfrontend_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.tf_export import tf_export
_audio_microfrontend_op = load_library.load_op_library(
resource_loader.get_path_to_datafile("_audio_microfrontend_op.so"))
@tf_export("lite.experimental.microfrontend.python.ops.audio_microfrontend")
def audio_microfrontend(audio,
sample_rate=16000,
window_size=25,
window_step=10,
num_channels=32,
upper_band_limit=7500.0,
lower_band_limit=125.0,
smoothing_bits=10,
even_smoothing=0.025,
odd_smoothing=0.06,
min_signal_remaining=0.05,
enable_pcan=True,
pcan_strength=0.95,
pcan_offset=80.0,
gain_bits=21,
enable_log=True,
scale_shift=6,
left_context=0,
right_context=0,
frame_stride=1,
zero_padding=False,
out_scale=1,
out_type=dtypes.uint16):
"""Audio Microfrontend Op.
This Op converts a sequence of audio data into one or more
feature vectors containing filterbanks of the input. The
conversion process uses a lightweight library to perform:
1. A slicing window function
2. Short-time FFTs
3. Filterbank calculations
4. Noise reduction
5. PCAN Auto Gain Control
6. Logarithmic scaling
Args:
audio: 1D Tensor, int16 audio data in temporal ordering.
sample_rate: Integer, the sample rate of the audio in Hz.
window_size: Integer, length of desired time frames in ms.
window_step: Integer, length of step size for the next frame in ms.
num_channels: Integer, the number of filterbank channels to use.
upper_band_limit: Float, the highest frequency included in the filterbanks.
lower_band_limit: Float, the lowest frequency included in the filterbanks.
smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.
even_smoothing: Float, smoothing coefficient for even-numbered channels.
odd_smoothing: Float, smoothing coefficient for odd-numbered channels.
min_signal_remaining: Float, fraction of signal to preserve in smoothing.
enable_pcan: Bool, enable PCAN auto gain control.
pcan_strength: Float, gain normalization exponent.
pcan_offset: Float, positive value added in the normalization denominator.
gain_bits: Int, number of fractional bits in the gain.
enable_log: Bool, enable logarithmic scaling of filterbanks.
scale_shift: Integer, scale filterbanks by 2^(scale_shift).
left_context: Integer, number of preceding frames to attach to each frame.
right_context: Integer, number of preceding frames to attach to each frame.
frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].
zero_padding: Bool, if left/right context is out-of-bounds, attach frame of
zeroes. Otherwise, frame[0] or frame[size-1] will be copied.
out_scale: Integer, divide all filterbanks by this number.
out_type: DType, type of the output Tensor, defaults to UINT16.
Returns:
filterbanks: 2D Tensor, each row is a time frame, each column is a channel.
Raises:
ValueError: If the audio tensor is not explicitly a vector.
"""
audio_shape = audio.shape
if audio_shape.ndims is None:
raise ValueError("Input to `AudioMicrofrontend` should have known rank.")
if len(audio_shape) > 1:
audio = array_ops.reshape(audio, [-1])
return gen_audio_microfrontend_op.audio_microfrontend(
audio, sample_rate, window_size, window_step, num_channels,
upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing,
odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength,
pcan_offset, gain_bits, enable_log, scale_shift, left_context,
right_context, frame_stride, zero_padding, out_scale, out_type)
ops.NotDifferentiable("AudioMicrofrontend")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py
|
#!/usr/bin/env python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This tool creates an html visualization of a TensorFlow Lite graph.
Example usage:
python visualize.py foo.tflite foo.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from tensorflow.python.platform import resource_loader
# Schema to use for flatbuffers
_SCHEMA = "third_party/tensorflow/lite/schema/schema.fbs"
# TODO(angerson): fix later when rules are simplified..
_SCHEMA = resource_loader.get_path_to_datafile("../schema/schema.fbs")
_BINARY = resource_loader.get_path_to_datafile("../../../flatbuffers/flatc")
# Account for different package positioning internal vs. external.
if not os.path.exists(_BINARY):
_BINARY = resource_loader.get_path_to_datafile(
"../../../../flatbuffers/flatc")
if not os.path.exists(_SCHEMA):
raise RuntimeError("Sorry, schema file cannot be found at %r" % _SCHEMA)
if not os.path.exists(_BINARY):
raise RuntimeError("Sorry, flatc is not available at %r" % _BINARY)
# A CSS description for making the visualizer
_CSS = """
<html>
<head>
<style>
body {font-family: sans-serif; background-color: #fa0;}
table {background-color: #eca;}
th {background-color: black; color: white;}
h1 {
background-color: ffaa00;
padding:5px;
color: black;
}
svg {
margin: 10px;
border: 2px;
border-style: solid;
border-color: black;
background: white;
}
div {
border-radius: 5px;
background-color: #fec;
padding:5px;
margin:5px;
}
.tooltip {color: blue;}
.tooltip .tooltipcontent {
visibility: hidden;
color: black;
background-color: yellow;
padding: 5px;
border-radius: 4px;
position: absolute;
z-index: 1;
}
.tooltip:hover .tooltipcontent {
visibility: visible;
}
.edges line {
stroke: #333;
}
text {
font-weight: bold;
}
.nodes text {
color: black;
pointer-events: none;
font-family: sans-serif;
font-size: 11px;
}
</style>
<script src="https://d3js.org/d3.v4.min.js"></script>
</head>
<body>
"""
_D3_HTML_TEMPLATE = """
<script>
// Build graph data
var graph = %s;
var svg = d3.select("#subgraph%d")
var width = svg.attr("width");
var height = svg.attr("height");
// Make the graph scrollable.
svg = svg.call(d3.zoom().on("zoom", function() {
svg.attr("transform", d3.event.transform);
})).append("g");
var color = d3.scaleOrdinal(d3.schemeDark2);
var simulation = d3.forceSimulation()
.force("link", d3.forceLink().id(function(d) {return d.id;}))
.force("charge", d3.forceManyBody())
.force("center", d3.forceCenter(0.5 * width, 0.5 * height));
function buildGraph() {
var edge = svg.append("g").attr("class", "edges").selectAll("line")
.data(graph.edges).enter().append("path").attr("stroke","black").attr("fill","none")
// Make the node group
var node = svg.selectAll(".nodes")
.data(graph.nodes)
.enter().append("g")
.attr("x", function(d){return d.x})
.attr("y", function(d){return d.y})
.attr("transform", function(d) {
return "translate( " + d.x + ", " + d.y + ")"
})
.attr("class", "nodes")
.call(d3.drag()
.on("start", function(d) {
if(!d3.event.active) simulation.alphaTarget(1.0).restart();
d.fx = d.x;d.fy = d.y;
})
.on("drag", function(d) {
d.fx = d3.event.x; d.fy = d3.event.y;
})
.on("end", function(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = d.fy = null;
}));
// Within the group, draw a box for the node position and text
// on the side.
var node_width = 150;
var node_height = 30;
node.append("rect")
.attr("r", "5px")
.attr("width", node_width)
.attr("height", node_height)
.attr("rx", function(d) { return d.group == 1 ? 1 : 10; })
.attr("stroke", "#000000")
.attr("fill", function(d) { return d.group == 1 ? "#dddddd" : "#000000"; })
node.append("text")
.text(function(d) { return d.name; })
.attr("x", 5)
.attr("y", 20)
.attr("fill", function(d) { return d.group == 1 ? "#000000" : "#eeeeee"; })
// Setup force parameters and update position callback
var node = svg.selectAll(".nodes")
.data(graph.nodes);
// Bind the links
var name_to_g = {}
node.each(function(data, index, nodes) {
console.log(data.id)
name_to_g[data.id] = this;
});
function proc(w, t) {
return parseInt(w.getAttribute(t));
}
edge.attr("d", function(d) {
function lerp(t, a, b) {
return (1.0-t) * a + t * b;
}
var x1 = proc(name_to_g[d.source],"x") + node_width /2;
var y1 = proc(name_to_g[d.source],"y") + node_height;
var x2 = proc(name_to_g[d.target],"x") + node_width /2;
var y2 = proc(name_to_g[d.target],"y");
var s = "M " + x1 + " " + y1
+ " C " + x1 + " " + lerp(.5, y1, y2)
+ " " + x2 + " " + lerp(.5, y1, y2)
+ " " + x2 + " " + y2
return s;
});
}
buildGraph()
</script>
"""
class OpCodeMapper(object):
"""Maps an opcode index to an op name."""
def __init__(self, data):
self.code_to_name = {}
for idx, d in enumerate(data["operator_codes"]):
self.code_to_name[idx] = d["builtin_code"]
def __call__(self, x):
if x not in self.code_to_name:
s = "<UNKNOWN>"
else:
s = self.code_to_name[x]
return "%s (%d)" % (s, x)
class DataSizeMapper(object):
"""For buffers, report the number of bytes."""
def __call__(self, x):
if x is not None:
return "%d bytes" % len(x)
else:
return "--"
class TensorMapper(object):
"""Maps a list of tensor indices to a tooltip hoverable indicator of more."""
def __init__(self, subgraph_data):
self.data = subgraph_data
def __call__(self, x):
html = ""
html += "<span class='tooltip'><span class='tooltipcontent'>"
for i in x:
tensor = self.data["tensors"][i]
html += str(i) + " "
html += tensor["name"] + " "
html += str(tensor["type"]) + " "
html += (repr(tensor["shape"]) if "shape" in tensor else "[]") + "<br>"
html += "</span>"
html += repr(x)
html += "</span>"
return html
def GenerateGraph(subgraph_idx, g, opcode_mapper):
"""Produces the HTML required to have a d3 visualization of the dag."""
def TensorName(idx):
return "t%d" % idx
def OpName(idx):
return "o%d" % idx
edges = []
nodes = []
first = {}
second = {}
pixel_mult = 200 # TODO(aselle): multiplier for initial placement
width_mult = 170 # TODO(aselle): multiplier for initial placement
for op_index, op in enumerate(g["operators"]):
for tensor_input_position, tensor_index in enumerate(op["inputs"]):
if tensor_index not in first:
first[tensor_index] = (
(op_index - 0.5 + 1) * pixel_mult,
(tensor_input_position + 1) * width_mult)
edges.append({
"source": TensorName(tensor_index),
"target": OpName(op_index)
})
for tensor_output_position, tensor_index in enumerate(op["outputs"]):
if tensor_index not in second:
second[tensor_index] = (
(op_index + 0.5 + 1) * pixel_mult,
(tensor_output_position + 1) * width_mult)
edges.append({
"target": TensorName(tensor_index),
"source": OpName(op_index)
})
nodes.append({
"id": OpName(op_index),
"name": opcode_mapper(op["opcode_index"]),
"group": 2,
"x": pixel_mult,
"y": (op_index + 1) * pixel_mult
})
for tensor_index, tensor in enumerate(g["tensors"]):
initial_y = (
first[tensor_index] if tensor_index in first
else second[tensor_index] if tensor_index in second
else (0, 0))
nodes.append({
"id": TensorName(tensor_index),
"name": "%r (%d)" % (getattr(tensor, "shape", []), tensor_index),
"group": 1,
"x": initial_y[1],
"y": initial_y[0]
})
graph_str = json.dumps({"nodes": nodes, "edges": edges})
html = _D3_HTML_TEMPLATE % (graph_str, subgraph_idx)
return html
def GenerateTableHtml(items, keys_to_print, display_index=True):
"""Given a list of object values and keys to print, make an HTML table.
Args:
items: Items to print an array of dicts.
keys_to_print: (key, display_fn). `key` is a key in the object. i.e.
items[0][key] should exist. display_fn is the mapping function on display.
i.e. the displayed html cell will have the string returned by
`mapping_fn(items[0][key])`.
display_index: add a column which is the index of each row in `items`.
Returns:
An html table.
"""
html = ""
# Print the list of items
html += "<table><tr>\n"
html += "<tr>\n"
if display_index:
html += "<th>index</th>"
for h, mapper in keys_to_print:
html += "<th>%s</th>" % h
html += "</tr>\n"
for idx, tensor in enumerate(items):
html += "<tr>\n"
if display_index:
html += "<td>%d</td>" % idx
# print tensor.keys()
for h, mapper in keys_to_print:
val = tensor[h] if h in tensor else None
val = val if mapper is None else mapper(val)
html += "<td>%s</td>\n" % val
html += "</tr>\n"
html += "</table>\n"
return html
def CreateHtmlFile(tflite_input, html_output):
"""Given a tflite model in `tflite_input` file, produce html description."""
# Convert the model into a JSON flatbuffer using flatc (build if doesn't
# exist.
if not os.path.exists(tflite_input):
raise RuntimeError("Invalid filename %r" % tflite_input)
if tflite_input.endswith(".tflite") or tflite_input.endswith(".bin"):
# Run convert
cmd = (
_BINARY + " -t "
"--strict-json --defaults-json -o /tmp {schema} -- {input}".format(
input=tflite_input, schema=_SCHEMA))
print(cmd)
os.system(cmd)
real_output = ("/tmp/" + os.path.splitext(
os.path.split(tflite_input)[-1])[0] + ".json")
data = json.load(open(real_output))
elif tflite_input.endswith(".json"):
data = json.load(open(tflite_input))
else:
raise RuntimeError("Input file was not .tflite or .json")
html = ""
html += _CSS
html += "<h1>TensorFlow Lite Model</h2>"
data["filename"] = tflite_input # Avoid special case
toplevel_stuff = [("filename", None), ("version", None), ("description",
None)]
html += "<table>\n"
for key, mapping in toplevel_stuff:
if not mapping:
mapping = lambda x: x
html += "<tr><th>%s</th><td>%s</td></tr>\n" % (key, mapping(data.get(key)))
html += "</table>\n"
# Spec on what keys to display
buffer_keys_to_display = [("data", DataSizeMapper())]
operator_keys_to_display = [("builtin_code", None), ("custom_code", None),
("version", None)]
for subgraph_idx, g in enumerate(data["subgraphs"]):
# Subgraph local specs on what to display
html += "<div class='subgraph'>"
tensor_mapper = TensorMapper(g)
opcode_mapper = OpCodeMapper(data)
op_keys_to_display = [("inputs", tensor_mapper), ("outputs", tensor_mapper),
("builtin_options", None), ("opcode_index",
opcode_mapper)]
tensor_keys_to_display = [("name", None), ("type", None), ("shape", None),
("buffer", None), ("quantization", None)]
html += "<h2>Subgraph %d</h2>\n" % subgraph_idx
# Inputs and outputs.
html += "<h3>Inputs/Outputs</h3>\n"
html += GenerateTableHtml(
[{
"inputs": g["inputs"],
"outputs": g["outputs"]
}], [("inputs", tensor_mapper), ("outputs", tensor_mapper)],
display_index=False)
# Print the tensors.
html += "<h3>Tensors</h3>\n"
html += GenerateTableHtml(g["tensors"], tensor_keys_to_display)
# Print the ops.
html += "<h3>Ops</h3>\n"
html += GenerateTableHtml(g["operators"], op_keys_to_display)
# Visual graph.
html += "<svg id='subgraph%d' width='1600' height='900'></svg>\n" % (
subgraph_idx,)
html += GenerateGraph(subgraph_idx, g, opcode_mapper)
html += "</div>"
# Buffers have no data, but maybe in the future they will
html += "<h2>Buffers</h2>\n"
html += GenerateTableHtml(data["buffers"], buffer_keys_to_display)
# Operator codes
html += "<h2>Operator Codes</h2>\n"
html += GenerateTableHtml(data["operator_codes"], operator_keys_to_display)
html += "</body></html>\n"
open(html_output, "w").write(html)
def main(argv):
try:
tflite_input = argv[1]
html_output = argv[2]
except IndexError:
print("Usage: %s <input tflite> <output html>" % (argv[0]))
else:
CreateHtmlFile(tflite_input, html_output)
if __name__ == "__main__":
main(sys.argv)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/tools/visualize.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite is for mobile and embedded devices.
TensorFlow Lite is the official solution for running machine learning models on
mobile and embedded devices. It enables on-device machine learning inference
with low latency and a small binary size on Android, iOS, and other operating
systems.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import subprocess
from distutils.command.build_ext import build_ext
import numpy
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_py import build_py
PACKAGE_NAME = 'tflite_runtime'
PACKAGE_VERSION = os.environ['TENSORFLOW_VERSION']
DOCLINES = __doc__.split('\n')
TENSORFLOW_DIR = os.environ['TENSORFLOW_SRC_ROOT']
# Setup cross compiling
TARGET = os.environ.get('TENSORFLOW_TARGET', None)
if TARGET == 'rpi':
os.environ['CXX'] = 'arm-rpi-linux-gnueabihf-g++'
os.environ['CC'] = 'arm-rpi-linux-gnueabihf-gcc'
elif TARGET == 'aarch64':
os.environ['CXX'] = 'aarch64-linux-gnu-g++'
os.environ['CC'] = 'aarch64-linux-gnu-gcc'
MAKE_CROSS_OPTIONS = ['TARGET=%s' % TARGET] if TARGET else []
RELATIVE_MAKE_DIR = os.path.join('tensorflow', 'lite', 'tools', 'make')
MAKE_DIR = os.path.join(TENSORFLOW_DIR, RELATIVE_MAKE_DIR)
DOWNLOADS_DIR = os.path.join(MAKE_DIR, 'downloads')
RELATIVE_MAKEFILE_PATH = os.path.join(RELATIVE_MAKE_DIR, 'Makefile')
DOWNLOAD_SCRIPT_PATH = os.path.join(MAKE_DIR, 'download_dependencies.sh')
# Check physical memory and if we are on a reasonable non small SOC machine
# with more than 4GB, use all the CPUs, otherwisxe only 1.
def get_build_cpus():
physical_bytes = os.sysconf('SC_PAGESIZE') * os.sysconf('SC_PHYS_PAGES')
if physical_bytes < (1<<30) * 4:
return 1
else:
return multiprocessing.cpu_count()
def make_args(target='', quiet=True):
"""Construct make command line."""
args = (['make', 'SHELL=/bin/bash',
'BUILD_WITH_NNAPI=false', '-C', TENSORFLOW_DIR]
+ MAKE_CROSS_OPTIONS +
['-f', RELATIVE_MAKEFILE_PATH, '-j',
str(get_build_cpus())])
if quiet:
args.append('--quiet')
if target:
args.append(target)
return args
def make_output(target):
"""Invoke make on the target and return output."""
return subprocess.check_output(make_args(target)).decode('utf-8').strip()
def make():
"""Invoke make to build tflite C++ sources.
Build dependencies:
apt-get install swig libjpeg-dev zlib1g-dev python3-dev python3-nump
"""
subprocess.check_call(make_args(quiet=False))
def download_dependencies():
"""Download build dependencies if haven't done yet."""
if not os.path.isdir(DOWNLOADS_DIR) or not os.listdir(DOWNLOADS_DIR):
subprocess.check_call(DOWNLOAD_SCRIPT_PATH)
class CustomBuildExt(build_ext, object):
"""Customized build extension."""
def get_ext_filename(self, ext_name):
if TARGET:
ext_path = ext_name.split('.')
return os.path.join(*ext_path) + '.so'
return super(CustomBuildExt, self).get_ext_filename(ext_name)
def run(self):
download_dependencies()
make()
return super(CustomBuildExt, self).run()
class CustomBuildPy(build_py, object):
def run(self):
self.run_command('build_ext')
return super(CustomBuildPy, self).run()
LIB_TFLITE = 'tensorflow-lite'
LIB_TFLITE_DIR = make_output('libdir')
ext = Extension(
name='%s._interpreter_wrapper' % PACKAGE_NAME,
language='c++',
sources=['interpreter_wrapper/interpreter_wrapper.i',
'interpreter_wrapper/interpreter_wrapper.cc',
'interpreter_wrapper/numpy.cc',
'interpreter_wrapper/python_error_reporter.cc',
'interpreter_wrapper/python_utils.cc'],
swig_opts=['-c++',
'-I%s' % TENSORFLOW_DIR,
'-module', 'interpreter_wrapper',
'-outdir', PACKAGE_NAME],
extra_compile_args=['-std=c++11'],
include_dirs=[TENSORFLOW_DIR,
os.path.join(TENSORFLOW_DIR, 'tensorflow', 'lite', 'tools',
'pip_package'),
numpy.get_include(),
os.path.join(DOWNLOADS_DIR, 'flatbuffers', 'include'),
os.path.join(DOWNLOADS_DIR, 'absl')],
libraries=[LIB_TFLITE],
library_dirs=[LIB_TFLITE_DIR])
setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/lite/',
author='Google Inc.',
author_email='packages@tensorflow.org',
license='Apache 2.0',
include_package_data=True,
keywords='tflite tensorflow tensor machine learning',
packages=find_packages(exclude=[]),
ext_modules=[ext],
cmdclass={
'build_ext': CustomBuildExt,
'build_py': CustomBuildPy,
}
)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/tools/pip_package/setup.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to convert ILSVRC devkit validation ground truth to synset labels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from os import path
import sys
import scipy.io
_SYNSET_ARRAYS_RELATIVE_PATH = 'data/meta.mat'
_VALIDATION_FILE_RELATIVE_PATH = 'data/ILSVRC2012_validation_ground_truth.txt'
def _synset_to_word(filepath):
"""Returns synset to word dictionary by reading sysnset arrays."""
mat = scipy.io.loadmat(filepath)
entries = mat['synsets']
# These fields are listed in devkit readme.txt
fields = [
'synset_id', 'WNID', 'words', 'gloss', 'num_children', 'children',
'wordnet_height', 'num_train_images'
]
synset_index = fields.index('synset_id')
words_index = fields.index('words')
synset_to_word = {}
for entry in entries:
entry = entry[0]
synset_id = int(entry[synset_index][0])
first_word = entry[words_index][0].split(',')[0]
synset_to_word[synset_id] = first_word
return synset_to_word
def _validation_file_path(ilsvrc_dir):
return path.join(ilsvrc_dir, _VALIDATION_FILE_RELATIVE_PATH)
def _synset_array_path(ilsvrc_dir):
return path.join(ilsvrc_dir, _SYNSET_ARRAYS_RELATIVE_PATH)
def _generate_validation_labels(ilsvrc_dir, output_file):
synset_to_word = _synset_to_word(_synset_array_path(ilsvrc_dir))
with open(_validation_file_path(ilsvrc_dir), 'r') as synset_id_file, open(
output_file, 'w') as output:
for synset_id in synset_id_file:
synset_id = int(synset_id)
output.write('%s\n' % synset_to_word[synset_id])
def _check_arguments(args):
if not args.validation_labels_output:
raise ValueError('Invalid path to output file.')
ilsvrc_dir = args.ilsvrc_devkit_dir
if not ilsvrc_dir or not path.isdir(ilsvrc_dir):
raise ValueError('Invalid path to ilsvrc_dir')
if not path.exists(_validation_file_path(ilsvrc_dir)):
raise ValueError('Invalid path to ilsvrc_dir, cannot find validation file.')
if not path.exists(_synset_array_path(ilsvrc_dir)):
raise ValueError(
'Invalid path to ilsvrc_dir, cannot find synset arrays file.')
def main():
parser = argparse.ArgumentParser(
description='Converts ILSVRC devkit validation_ground_truth.txt to synset'
' labels file that can be used by the accuracy script.')
parser.add_argument(
'--validation_labels_output',
type=str,
help='Full path for outputting validation labels.')
parser.add_argument(
'--ilsvrc_devkit_dir',
type=str,
help='Full path to ILSVRC 2012 devikit directory.')
args = parser.parse_args()
try:
_check_arguments(args)
except ValueError as e:
parser.print_usage()
file_name = path.basename(sys.argv[0])
sys.stderr.write('{0}: error: {1}\n'.format(file_name, str(e)))
sys.exit(1)
_generate_validation_labels(args.ilsvrc_devkit_dir,
args.validation_labels_output)
if __name__ == '__main__':
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/tools/accuracy/ilsvrc/generate_validation_labels.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocesses COCO minival data for Object Detection evaluation using mean Average Precision.
The 2014 validation images & annotations can be downloaded from:
http://cocodataset.org/#download
The minival image ID whitelist, a subset of the 2014 validation set, can be
found here:
https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_minival_ids.txt.
This script takes in the original images folder, instances JSON file and
image ID whitelist and produces the following in the specified output folder:
A subfolder for whitelisted images (images/), and a file (ground_truth.pbtxt)
containing an instance of tflite::evaluation::ObjectDetectionGroundTruth.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import os
import shutil
import sys
from tensorflow.lite.tools.evaluation.proto import evaluation_stages_pb2
def _get_ground_truth_detections(instances_file,
whitelist_file,
num_images=None):
"""Processes the annotations JSON file and returns ground truth data corresponding to whitelisted image IDs.
Args:
instances_file: COCO instances JSON file, usually named as
instances_val20xx.json.
whitelist_file: File containing COCO minival image IDs to whitelist for
evaluation, one per line.
num_images: Number of whitelisted images to pre-process. First num_images
are chosen based on sorted list of filenames. If None, all whitelisted
files are preprocessed.
Returns:
A dict mapping image id (int) to a per-image dict that contains:
'filename', 'image' & 'height' mapped to filename & image dimensions
respectively
AND
'detections' to a list of detection dicts, with each mapping:
'category_id' to COCO category id (starting with 1) &
'bbox' to a list of dimension-normalized [top, left, bottom, right]
bounding-box values.
"""
# Read whitelist.
with open(whitelist_file, 'r') as whitelist:
image_id_whitelist = set([int(x) for x in whitelist.readlines()])
# Read JSON data into a dict.
with open(instances_file, 'r') as annotation_dump:
data_dict = ast.literal_eval(annotation_dump.readline())
image_data = {}
all_file_names = []
# Get image names and dimensions.
for image_dict in data_dict['images']:
if image_dict['id'] not in image_id_whitelist:
continue
image_data_dict = {}
image_data_dict['file_name'] = image_dict['file_name']
all_file_names.append(image_data_dict['file_name'])
image_data_dict['height'] = image_dict['height']
image_data_dict['width'] = image_dict['width']
image_data_dict['detections'] = []
image_data[image_dict['id']] = image_data_dict
if num_images:
all_file_names.sort()
all_file_names = all_file_names[:num_images]
all_file_names = set(all_file_names)
# Get detected object annotations per image.
for annotation_dict in data_dict['annotations']:
image_id = annotation_dict['image_id']
if image_id not in image_id_whitelist or image_id not in image_data:
continue
image_data_dict = image_data[image_id]
if image_data_dict['file_name'] not in all_file_names:
del image_data[image_id]
continue
bbox = annotation_dict['bbox']
# bbox format is [x, y, width, height]
# Refer: http://cocodataset.org/#format-data
top = bbox[1]
left = bbox[0]
bottom = top + bbox[3]
right = left + bbox[2]
if (top > image_data_dict['height'] or left > image_data_dict['width'] or
bottom > image_data_dict['height'] or right > image_data_dict['width']):
continue
object_d = {}
object_d['bbox'] = [
top / image_data_dict['height'], left / image_data_dict['width'],
bottom / image_data_dict['height'], right / image_data_dict['width']
]
object_d['category_id'] = annotation_dict['category_id']
image_data_dict['detections'].append(object_d)
return image_data
def _dump_data(ground_truth_detections, images_folder_path, output_folder_path):
"""Dumps images & data from ground-truth objects into output_folder_path.
The following are created in output_folder_path:
images/: sub-folder for whitelisted validation images.
ground_truth.pbtxt: A text proto file containing all ground-truth
object-sets.
Args:
ground_truth_detections: A dict mapping image id to ground truth data.
Output of _get_ground_truth_detections.
images_folder_path: Validation images folder
output_folder_path: folder to output files to.
"""
# Ensure output folders exist.
if not os.path.exists(output_folder_path):
os.makedirs(output_folder_path)
output_images_folder = os.path.join(output_folder_path, 'images')
if not os.path.exists(output_images_folder):
os.makedirs(output_images_folder)
output_proto_file = os.path.join(output_folder_path, 'ground_truth.pbtxt')
ground_truth_data = evaluation_stages_pb2.ObjectDetectionGroundTruth()
for image_dict in ground_truth_detections.values():
# Create an ObjectsSet proto for this file's ground truth.
detection_result = ground_truth_data.detection_results.add()
detection_result.image_name = image_dict['file_name']
for detection_dict in image_dict['detections']:
object_instance = detection_result.objects.add()
object_instance.bounding_box.normalized_top = detection_dict['bbox'][0]
object_instance.bounding_box.normalized_left = detection_dict['bbox'][1]
object_instance.bounding_box.normalized_bottom = detection_dict['bbox'][2]
object_instance.bounding_box.normalized_right = detection_dict['bbox'][3]
object_instance.class_id = detection_dict['category_id']
# Copy image.
shutil.copy2(
os.path.join(images_folder_path, image_dict['file_name']),
output_images_folder)
# Dump proto.
with open(output_proto_file, 'w') as proto_file:
proto_file.write(str(ground_truth_data))
def _parse_args():
"""Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments.
"""
parser = argparse.ArgumentParser(
description='preprocess_coco_minival: Preprocess COCO minival dataset')
parser.add_argument(
'--images_folder',
type=str,
help='Full path of the validation images folder.',
required=True)
parser.add_argument(
'--instances_file',
type=str,
help='Full path of the input JSON file, like instances_val20xx.json.',
required=True)
parser.add_argument(
'--whitelist_file',
type=str,
help='File with COCO image ids to preprocess, one on each line.',
required=True)
parser.add_argument(
'--num_images',
type=int,
help='Number of whitelisted images to preprocess into the output folder.',
required=False)
parser.add_argument(
'--output_folder',
type=str,
help='Full path to output images & text proto files into.',
required=True)
return parser.parse_known_args(args=sys.argv[1:])[0]
if __name__ == '__main__':
args = _parse_args()
ground_truths = _get_ground_truth_detections(args.instances_file,
args.whitelist_file,
args.num_images)
_dump_data(ground_truths, args.images_folder, args.output_folder)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/tools/evaluation/tasks/coco_object_detection/preprocess_coco_minival.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to convert SavedModel to frozen GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python import util
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
def _log_tensor_details(tensor_info):
"""Log tensor details: name, shape, and type."""
for key in tensor_info:
val = tensor_info[key]
dtype = types_pb2.DataType.Name(val.dtype)
if val.tensor_shape.unknown_rank:
shape = "unknown_rank"
else:
dims = [str(dim.size) for dim in val.tensor_shape.dim]
shape = "({})".format(", ".join(dims))
logging.info("Tensor's key in saved_model's tensor_map: %s", key)
logging.info(" tensor name: %s, shape: %s, type: %s", val.name, shape,
dtype)
def get_meta_graph_def(saved_model_dir, tag_set):
"""Validate saved_model and extract MetaGraphDef.
Args:
saved_model_dir: saved_model path to convert.
tag_set: Set of tag(s) of the MetaGraphDef to load.
Returns:
The meta_graph_def used for tflite conversion.
Raises:
ValueError: No valid MetaGraphDef for given tag_set.
"""
with session.Session(graph=ops.Graph()) as sess:
return loader.load(sess, tag_set, saved_model_dir)
def get_signature_def(meta_graph, signature_key):
"""Get the signature def from meta_graph with given signature_key.
Args:
meta_graph: meta_graph_def.
signature_key: signature_def in the meta_graph_def.
Returns:
The signature_def used for tflite conversion.
Raises:
ValueError: Given signature_key is not valid for this meta_graph.
"""
signature_def_map = meta_graph.signature_def
signature_def_keys = set(signature_def_map.keys())
logging.info(
"The given SavedModel MetaGraphDef contains SignatureDefs with the "
"following keys: %s", signature_def_keys)
if signature_key not in signature_def_keys:
raise ValueError("No '{}' in the SavedModel\'s SignatureDefs. Possible "
"values are '{}'.".format(signature_key,
",".join(signature_def_keys)))
return signature_def_map[signature_key]
def get_inputs_outputs(signature_def):
"""Get inputs and outputs from SignatureDef.
Args:
signature_def: SignatureDef in the meta_graph_def for conversion.
Returns:
The inputs and outputs in the graph for conversion.
"""
inputs_tensor_info = signature_def.inputs
outputs_tensor_info = signature_def.outputs
logging.info("input tensors info: ")
_log_tensor_details(inputs_tensor_info)
logging.info("output tensors info: ")
_log_tensor_details(outputs_tensor_info)
def gather_names(tensor_info):
return [tensor_info[key].name for key in tensor_info]
inputs = gather_names(inputs_tensor_info)
outputs = gather_names(outputs_tensor_info)
return inputs, outputs
def _get_tensors(graph, signature_def_tensor_names=None,
user_tensor_names=None):
"""Gets the tensors associated with the tensor names.
Either signature_def_tensor_names or user_tensor_names should be provided. If
the user provides tensors, the tensors associated with the user provided
tensor names are provided. Otherwise, the tensors associated with the names in
the SignatureDef are provided.
Args:
graph: GraphDef representing graph.
signature_def_tensor_names: Tensor names stored in either the inputs or
outputs of a SignatureDef. (default None)
user_tensor_names: Tensor names provided by the user. (default None)
Returns:
List of tensors.
Raises:
ValueError:
signature_def_tensors and user_tensor_names are undefined or empty.
user_tensor_names are not valid.
"""
tensors = []
if user_tensor_names:
# Sort the tensor names.
user_tensor_names = sorted(user_tensor_names)
tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names)
elif signature_def_tensor_names:
tensors = [
graph.get_tensor_by_name(name)
for name in sorted(signature_def_tensor_names)
]
else:
# Throw ValueError if signature_def_tensors and user_tensor_names are both
# either undefined or empty.
raise ValueError(
"Specify either signature_def_tensor_names or user_tensor_names")
return tensors
def freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key):
"""Converts a SavedModel to a frozen graph.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input arrays
from SignatureDef when none are provided.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
Returns:
frozen_graph_def: Frozen GraphDef.
in_tensors: List of input tensors for the graph.
out_tensors: List of output tensors for the graph.
graph: `Graph` object.
Raises:
ValueError:
SavedModel doesn't contain a MetaGraphDef identified by tag_set.
signature_key is not in the MetaGraphDef.
assets/ directory is in the MetaGraphDef.
input_shapes does not match the length of input_arrays.
input_arrays or output_arrays are not valid.
"""
# Read SignatureDef.
meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
signature_def = get_signature_def(meta_graph, signature_key)
inputs, outputs = get_inputs_outputs(signature_def)
# Check SavedModel for assets directory.
collection_def = meta_graph.collection_def
if constants.ASSETS_KEY in collection_def:
raise ValueError("SavedModels with assets/ directory are not supported.")
graph = ops.Graph()
with session.Session(graph=graph) as sess:
loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)
# Gets input and output tensors.
# TODO(zhixianyan): Use TFLite supported Op list to filter outputs.
in_tensors = _get_tensors(graph, inputs, input_arrays)
out_tensors = _get_tensors(graph, outputs, output_arrays)
util.set_tensor_shapes(in_tensors, input_shapes)
frozen_graph_def = util.freeze_graph(sess, in_tensors, out_tensors)
return frozen_graph_def, in_tensors, out_tensors, sess.graph
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/convert_saved_model.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wraps toco interface with python lazy loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
# TODO(b/137402359): Remove lazy loading wrapper
def wrapped_toco_convert(model_flags_str, toco_flags_str, input_data_str,
debug_info_str, enable_mlir_converter):
"""Wraps TocoConvert with lazy loader."""
return pywrap_tensorflow.TocoConvert(
model_flags_str,
toco_flags_str,
input_data_str,
False, # extended_return
debug_info_str,
enable_mlir_converter)
def wrapped_get_potentially_supported_ops():
"""Wraps TocoGetPotentiallySupportedOps with lazy loader."""
return pywrap_tensorflow.TocoGetPotentiallySupportedOps()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/wrap_toco.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFLite SavedModel conversion test cases.
- Tests converting simple SavedModel graph to TFLite FlatBuffer.
- Tests converting simple SavedModel graph to frozen graph.
- Tests converting MNIST SavedModel to TFLite FlatBuffer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.lite.python import convert_saved_model
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
class FreezeSavedModelTest(test_util.TensorFlowTestCase):
def _createSimpleSavedModel(self, shape):
"""Create a simple SavedModel on the fly."""
saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
with session.Session() as sess:
in_tensor = array_ops.placeholder(shape=shape, dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
inputs = {"x": in_tensor}
outputs = {"y": out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _createSavedModelTwoInputArrays(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name="inputB")
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name="inputA")
out_tensor = in_tensor_1 + in_tensor_2
inputs = {"x": in_tensor_1, "y": in_tensor_2}
outputs = {"z": out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _getArrayNames(self, tensors):
return [tensor.name for tensor in tensors]
def _getArrayShapes(self, tensors):
dims = []
for tensor in tensors:
dim_tensor = []
for dim in tensor.shape:
if isinstance(dim, tensor_shape.Dimension):
dim_tensor.append(dim.value)
else:
dim_tensor.append(dim)
dims.append(dim_tensor)
return dims
def _convertSavedModel(self,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
if tag_set is None:
tag_set = set([tag_constants.SERVING])
if signature_key is None:
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
graph_def, in_tensors, out_tensors, _ = (
convert_saved_model.freeze_saved_model(
saved_model_dir=saved_model_dir,
input_arrays=input_arrays,
input_shapes=input_shapes,
output_arrays=output_arrays,
tag_set=tag_set,
signature_key=signature_key))
return graph_def, in_tensors, out_tensors
def testSimpleSavedModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(saved_model_dir)
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testSimpleSavedModelWithNoneBatchSizeInShape(self):
"""Test a SavedModel with None in input tensor's shape."""
saved_model_dir = self._createSimpleSavedModel(shape=[None, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(saved_model_dir)
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[None, 16, 16, 3]])
def testSimpleSavedModelWithInvalidSignatureKey(self):
"""Test a SavedModel that fails due to an invalid signature_key."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, signature_key="invalid-key")
self.assertEqual(
"No 'invalid-key' in the SavedModel's SignatureDefs. "
"Possible values are 'serving_default'.", str(error.exception))
def testSimpleSavedModelWithInvalidOutputArray(self):
"""Test a SavedModel that fails due to invalid output arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, output_arrays=["invalid-output"])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
def testSimpleSavedModelWithWrongInputArrays(self):
"""Test a SavedModel that fails due to invalid input arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
# Check invalid input_arrays.
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, input_arrays=["invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Check valid and invalid input_arrays.
with self.assertRaises(ValueError) as error:
self._convertSavedModel(
saved_model_dir, input_arrays=["Placeholder", "invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
def testSimpleSavedModelWithCorrectArrays(self):
"""Test a SavedModel with correct input_arrays and output_arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[None, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["Placeholder"],
output_arrays=["add"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[None, 16, 16, 3]])
def testSimpleSavedModelWithCorrectInputArrays(self):
"""Test a SavedModel with correct input_arrays and input_shapes."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["Placeholder"],
input_shapes={"Placeholder": [1, 16, 16, 3]})
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testTwoInputArrays(self):
"""Test a simple SavedModel."""
saved_model_dir = self._createSavedModelTwoInputArrays(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir, input_arrays=["inputB", "inputA"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0", "inputB:0"])
self.assertEqual(
self._getArrayShapes(in_tensors), [[1, 16, 16, 3], [1, 16, 16, 3]])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModelTwoInputArrays(shape=[1, 16, 16, 3])
# Check case where input shape is given.
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["inputA"],
input_shapes={"inputA": [1, 16, 16, 3]})
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
# Check case where input shape is None.
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir, input_arrays=["inputA"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testMultipleMetaGraphDef(self):
"""Test saved model with multiple MetaGraphDefs."""
saved_model_dir = os.path.join(self.get_temp_dir(), "savedmodel_two_mgd")
builder = saved_model.builder.SavedModelBuilder(saved_model_dir)
with session.Session(graph=ops.Graph()) as sess:
# MetaGraphDef 1
in_tensor = array_ops.placeholder(shape=[1, 28, 28], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sig_input_tensor = saved_model.utils.build_tensor_info(in_tensor)
sig_input_tensor_signature = {"x": sig_input_tensor}
sig_output_tensor = saved_model.utils.build_tensor_info(out_tensor)
sig_output_tensor_signature = {"y": sig_output_tensor}
predict_signature_def = (
saved_model.signature_def_utils.build_signature_def(
sig_input_tensor_signature, sig_output_tensor_signature,
saved_model.signature_constants.PREDICT_METHOD_NAME))
signature_def_map = {
saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
predict_signature_def
}
builder.add_meta_graph_and_variables(
sess,
tags=[saved_model.tag_constants.SERVING, "additional_test_tag"],
signature_def_map=signature_def_map)
# MetaGraphDef 2
builder.add_meta_graph(tags=["tflite"])
builder.save(True)
# Convert to tflite
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
tag_set=set([saved_model.tag_constants.SERVING, "additional_test_tag"]))
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 28, 28]])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/convert_saved_model_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions used by multiple converter files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.core.protobuf import config_pb2 as _config_pb2
from tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
from tensorflow.lite.python.op_hint import find_all_hinted_output_nodes
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.eager import function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import error_interpolation as _error_interpolation
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training.saver import export_meta_graph as _export_meta_graph
# Map of tf.dtypes to TFLite types_flag_pb2.
_MAP_TF_TO_TFLITE_TYPES = {
dtypes.float32: _types_pb2.FLOAT,
dtypes.float16: _types_pb2.FLOAT16,
dtypes.int32: _types_pb2.INT32,
dtypes.int64: _types_pb2.INT64,
dtypes.string: _types_pb2.STRING,
dtypes.uint8: _types_pb2.QUANTIZED_UINT8,
dtypes.int8: _types_pb2.INT8,
dtypes.complex64: _types_pb2.COMPLEX64,
dtypes.bool: _types_pb2.BOOL,
}
def convert_dtype_to_tflite_type(tf_dtype):
"""Converts tf.dtype to TFLite proto type.
Args:
tf_dtype: tf.dtype
Raises:
ValueError: Unsupported tf.dtype.
Returns:
types_flag_pb2.
"""
result = _MAP_TF_TO_TFLITE_TYPES.get(tf_dtype)
if result is None:
raise ValueError("Unsupported tf.dtype {0}".format(tf_dtype))
return result
def get_tensor_name(tensor):
"""Returns name of the input tensor.
Args:
tensor: tf.Tensor
Returns:
str
"""
parts = tensor.name.split(":")
if len(parts) > 2:
raise ValueError("Tensor name invalid. Expect 0 or 1 colon, got {0}".format(
len(parts) - 1))
# To be consistent with the tensor naming scheme in tensorflow, we need
# drop the ':0' suffix for the first tensor.
if len(parts) > 1 and parts[1] != "0":
return tensor.name
return parts[0]
def get_tensors_from_tensor_names(graph, tensor_names):
"""Gets the Tensors associated with the `tensor_names` in the provided graph.
Args:
graph: TensorFlow Graph.
tensor_names: List of strings that represent names of tensors in the graph.
Returns:
A list of Tensor objects in the same order the names are provided.
Raises:
ValueError:
tensor_names contains an invalid tensor name.
"""
# Get the list of all of the tensors.
tensor_name_to_tensor = {}
for op in graph.get_operations():
for tensor in op.values():
tensor_name_to_tensor[get_tensor_name(tensor)] = tensor
# Get the tensors associated with tensor_names.
tensors = []
invalid_tensors = []
for name in tensor_names:
tensor = tensor_name_to_tensor.get(name)
if tensor is None:
invalid_tensors.append(name)
else:
tensors.append(tensor)
# Throw ValueError if any user input names are not valid tensors.
if invalid_tensors:
raise ValueError("Invalid tensors '{}' were found.".format(
",".join(invalid_tensors)))
return tensors
def set_tensor_shapes(tensors, shapes):
"""Sets Tensor shape for each tensor if the shape is defined.
Args:
tensors: TensorFlow ops.Tensor.
shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Raises:
ValueError:
`shapes` contains an invalid tensor.
`shapes` contains an invalid shape for a valid tensor.
"""
if shapes:
tensor_names_to_tensor = {
get_tensor_name(tensor): tensor for tensor in tensors
}
for name, shape in shapes.items():
if name not in tensor_names_to_tensor:
raise ValueError("Invalid tensor \'{}\' found in tensor shapes "
"map.".format(name))
if shape is not None:
tensor = tensor_names_to_tensor[name]
try:
tensor.set_shape(shape)
except ValueError as error:
message = ("The shape of tensor '{0}' cannot be changed from {1} to "
"{2}. {3}".format(name, tensor.shape, shape, str(error)))
raise ValueError(message)
def get_grappler_config(optimizers_list):
"""Creates a tf.compat.v1.ConfigProto for configuring Grappler.
Args:
optimizers_list: List of strings that represents the list of optimizers.
Returns:
tf.ConfigProto.
"""
config = _config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
for optimizer in optimizers_list:
rewrite_options.optimizers.append(optimizer)
return config
def run_graph_optimizations(graph_def,
input_arrays,
output_arrays,
config,
graph=None):
"""Apply standard TensorFlow optimizations to the graph_def.
Args:
graph_def: Frozen GraphDef to be optimized.
input_arrays: List of arrays that are considered inputs of the graph.
output_arrays: List of arrays that are considered outputs of the graph.
config: tf.ConfigProto.
graph: TensorFlow Graph. Required when Eager mode is enabled. (default None)
Returns:
A new, optimized GraphDef.
"""
meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)
# We need to add a collection called 'train_op' so that grappler
# knows what the outputs are.
fetch_collection = _meta_graph_pb2.CollectionDef()
for array in input_arrays + output_arrays:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _convert_op_hints_if_present(sess, graph_def, output_tensors,
hinted_outputs_nodes):
if is_frozen_graph(sess):
raise ValueError("Try to convert op hints, needs unfrozen graph.")
output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]
graph_def = tf_graph_util.convert_variables_to_constants(
sess, graph_def, output_arrays + hinted_outputs_nodes)
graph_def = convert_op_hints_to_stubs(graph_def=graph_def)
graph_def = tf_graph_util.remove_training_nodes(graph_def)
return graph_def
def freeze_graph(sess, input_tensors, output_tensors):
"""Returns a frozen GraphDef.
Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the
existing GraphDef is returned. The Grappler pass is only run on models that
are frozen in order to inline the functions in the graph.
If OpHints is present, it will try to convert the OpHint graph.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors.
output_tensors: List of output tensors (only .name is used from this).
Returns:
Frozen GraphDef.
"""
# Runs a Grappler pass in order to inline any functions in the graph.
# Asides from inlining any simple function, Grappler will also try to lower
# while loop into switch merge representation which is undesired for Ophints,
# so we simply remove those attributes to prevent Grappler from doing so.
graph_def = _convert_to_constants.disable_lower_using_switch_merge(
sess.graph_def)
config = get_grappler_config(["function"])
graph_def = run_graph_optimizations(
graph_def, input_tensors, output_tensors, config, graph=sess.graph)
# If ophints are present, just convert them.
hinted_outputs_nodes = find_all_hinted_output_nodes(sess)
if hinted_outputs_nodes:
return _convert_op_hints_if_present(sess, graph_def, output_tensors,
hinted_outputs_nodes)
if not is_frozen_graph(sess):
output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]
return tf_graph_util.convert_variables_to_constants(sess, graph_def,
output_arrays)
else:
return sess.graph_def
def is_frozen_graph(sess):
"""Determines if the graph is frozen.
Determines if a graph has previously been frozen by checking for any
operations of type Variable*. If variables are found, the graph is not frozen.
Args:
sess: TensorFlow Session.
Returns:
Bool.
"""
for op in sess.graph.get_operations():
if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
def build_debug_info_func(original_graph):
"""Returns a method to retrieve the `GraphDebugInfo` from the original graph.
Args:
original_graph: The original `Graph` containing all the op stack traces.
Returns:
A function which retrieves the stack traces from the original graph and
converts them to a `GraphDebugInfo` for a given set of nodes.
"""
def f(original_nodes):
"""Function to create `GraphDebugInfo` for the given `original_nodes`."""
if not original_graph:
return None
# For the given nodes, gets all the op definitions in the original graph.
useful_ops = []
for func, name in original_nodes:
try:
if not func:
useful_ops.append((func, original_graph.get_operation_by_name(name)))
else:
sub_func = original_graph._get_function(func) # pylint: disable=protected-access
if isinstance(sub_func, function._EagerDefinedFunction): # pylint: disable=protected-access
useful_ops.append(
(func, sub_func.graph.get_operation_by_name(name)))
else:
sys.stderr.write(
"Use '@tf.function' or '@defun' to decorate the function.")
continue
except KeyError:
# New node created by graph optimizer. No stack trace from source code.
continue
# Convert all the op definitions to stack traces in terms of GraphDebugInfo.
return _error_interpolation.create_graph_debug_info_def(useful_ops)
return f
def get_debug_info(nodes_to_debug_info_func, converted_graph):
"""Returns the debug info for the original nodes in the `converted_graph`.
Args:
nodes_to_debug_info_func: The method to collect the op debug info for the
nodes.
converted_graph: A `GraphDef` after optimization and transfermation.
Returns:
`GraphDebugInfo` for all the original nodes in `converted_graph`.
"""
if not nodes_to_debug_info_func:
return None
# Collect all the debug info nodes from the converted_graph
original_nodes = set()
for node in converted_graph.node:
debug_nodes = node.experimental_debug_info.original_node_names
debug_funcs = node.experimental_debug_info.original_func_names
# If the `original_node_names` are empty, uses the node name directly.
if not debug_nodes:
original_nodes.add(("", node.name))
else:
for i in range(len(debug_nodes)):
original_nodes.add((debug_funcs[i], debug_nodes[i]))
# Convert the nodes to the debug info proto object.
return nodes_to_debug_info_func(original_nodes)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.lite.OpHint("cool_activation")
input, = custom.add_inputs(input)
output = tf.sigmoid(input) * input
output, = custom.add_outputs(output)
return output
image = tf.compat.v1.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.compat.v1.Session()
graphdef_to_convert = tf.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.compat.v1.lite.toco_convert(
graphdef_to_convert, [image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
# TODO(aselle): Make this use generic graph transformations.
# TODO(aselle): _tensor_name_base should be called _tensor_name_to_op_name.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import copy as _copy
import json as _json
import uuid as _uuid
import six as _six
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.core.framework import node_def_pb2 as _node_def_pb2
from tensorflow.python.framework import ops as _ops
# TODO(aselle): publicize these apis if we continue to use these.
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util import compat as _compat
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export as _tf_export
@_tf_export(v1=["lite.OpHint"])
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
"""
# TODO(aselle): When TensorFlow functions functionality works for arbitrary
# constructs, this mechanism can be retired and changed to use python defun's.
# Attr constants that are used for representation in the GraphDef. These
# will be used on every Identity op that is involved in a total OpHint.
# Name of the OpHint function (cosmetic).
FUNCTION_NAME_ATTR = "_tflite_function_name"
# UUID of the function (each OpHint gets a new uuid).
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
# The input index of the input (or nothing if it is an output).
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
# The output index of the output (or nothing if it is an input).
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
# An index that orders aggregate arguments. Aggregate arguments are ones
# that are separate but will be fused horizontally. For example a static LSTM
# has a lstm cell for each time step. Each one has a separate opHint, but a
# fused SequentialLSTM will treat this as a single tensor.
FUNCTION_SORT_INDEX_ATTR = "_tflite_function_sort_index"
# The way in which multiple parts of the aggregate argument will be joined
# into a fused operand. Valid options are OpHint.AGGREGATE_FIRST,
# OpHint.AGGREGATE_LAST, OpHint.AGGREGATE_STACK.
FUNCTION_AGGREGATE_ATTR = "_tflite_function_aggregate"
# On fused OpHint stub, the order of inputs that the final LSTM call will
# have. What this means is that the TensorFlow order might be
# "foo", "bar", "stuff" and you might want the TF lite op order to be
# "stuff", "foo", "bar", -1 (where -1 is unused). So you would set this
# attribute to [2, 0, 1, -1].
TFLITE_INPUT_INDICES = "_tflite_input_indices"
# OpHint level.
FUNCTION_LEVEL_ATTR = "_tflite_ophint_level"
# Ophint internal mapping, this is for high level Ophint only.
# This basically contains three kinds of mapping:
# 1) How parental ophinted inputs map to the first child ophinted inputs;
# 2) How internal children nodes are connected;
# 3) How parental ophinted outputs map to the last child ophinted outputs.
CHILDREN_INPUTS_MAPPINGS = "_tflite_children_ophint_inputs_mapping"
# Types of aggregations
# stack: stacks all ophints with matching tags. i.e. for a static rnn.
# specifically, this is good for an input or output to a static rnn cell.
AGGREGATE_STACK = "stack"
# first: only takes the first output (one with lowest sort index)
# of matching tags. This is good for the input state to an RNN.
AGGREGATE_FIRST = "first"
# aggregation last takes only the last tag (one with highest sort index).
# This is good for an output value on the last stack item of a
# static rnn.
AGGREGATE_LAST = "last"
class OpHintArgumentTracker(object):
"""Conceptually tracks indices of arguments of "OpHint functions".
The inputs and arguments of these functions both use an instance
of the class so they can have independent numbering.
"""
def __init__(self,
function_name,
unique_function_id,
node_name_prefix,
attr_name,
level=1,
children_inputs_mappings=None):
"""Initialize ophint argument.
Args:
function_name: Name of the function that this tracks arguments for.
unique_function_id: UUID of function that this tracks arguments for.
node_name_prefix: How identities that are created are named.
attr_name: Name of attribute to use to store the index for this hint.
i.e. FUNCTION_INPUT_INDEX or FUNCTION_OUTPUT_INDEX
level: Hierarchical level of the Ophint node, a number.
children_inputs_mappings: Inputs/Outputs mapping for children hints.
"""
# The global index is the argument index of the op. This is in contrast
# to the sort index which is the sequence number of a particular instance
# of a given global index. For example, you may have called add hint
# twice with the tag "foo". Then the global index will be 0 for both
# and the sort index will be 0 for the first added and 1 for the second.
self._function_name = function_name
self._unique_function_id = unique_function_id
self._next_global_index = 0 # The absolute global index
self._used_global_indices = set()
self._tag_to_global_index = {} # The argument index a given tag maps to
self._tag_to_next_sort_index = {} # The current index for each tag
self._node_name_prefix = node_name_prefix
self._attr_name = attr_name
self._level = level
self._children_inputs_mappings = children_inputs_mappings
def _get_new_global_index(self, index_override):
"""Return the next unused argument index in order or use an override.
Args:
index_override: An index to use instead of the next available or None
to use the next available.
Returns:
A valid global_index to use for the next hint argument.
Raises:
ValueError: If the index_override is already used by another hint.
"""
if index_override is None:
global_index = self._next_global_index
else:
if index_override in self._used_global_indices:
raise ValueError("Index %d was already used by another call to add")
global_index = index_override
# Make next_global_index valid
self._used_global_indices.add(global_index)
while self._next_global_index in self._used_global_indices:
self._next_global_index += 1
return global_index
def add(self, arg, tag=None, name=None, aggregate=None,
index_override=None):
"""Return a wrapped tensor of an input tensor as an argument.
Args:
arg: A TensorFlow tensor that should be considered an argument.
tag: String tag to identify arguments that should be packed.
name: Name of argument. This is included in the Identity hint op names.
aggregate: Strategy to aggregate.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
Note, aggregate is only valid if tag is specified.
index_override: Specify what input/output index should this be in the
final stub. i.e. add(arg0, index=1); add(arg1, index=0) will make the
final stub be as stub_func(inputs[arg1, arg0], outputs=[]) rather than
the default call order based ordering.
Returns:
A tensor representing the wrapped argument.
Raises:
ValueError: When indices are not consistent.
"""
# Find the appropriate index
if tag is None:
if aggregate is not None:
raise ValueError("You must specify `tag` if using aggregate.")
global_index = self._get_new_global_index(index_override)
sort_index = None
else:
if aggregate is None:
raise ValueError("You must specify `aggregate` if using tag.")
if tag not in self._tag_to_global_index:
self._tag_to_global_index[tag] = (
self._get_new_global_index(index_override))
self._tag_to_next_sort_index[tag] = 0
elif (index_override and
index_override != self._tag_to_global_index[tag]):
raise ValueError(
"Tag %r was called with two indices %r and %r" %
(tag, index_override, self._tag_to_global_index[tag]))
global_index = self._tag_to_global_index[tag]
sort_index = self._tag_to_next_sort_index[tag]
self._tag_to_next_sort_index[tag] += 1
uuid = self._unique_function_id
name = "%s-%s-%s-%r-%r-%s" % (self._node_name_prefix, self._function_name,
uuid, global_index, sort_index, name)
identity_op = _array_ops.identity(arg, name=name)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(self._function_name)))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(self._unique_function_id)))
identity_op.op._set_attr(
self._attr_name, _attr_value_pb2.AttrValue(i=global_index))
identity_op.op._set_attr(OpHint.FUNCTION_LEVEL_ATTR,
_attr_value_pb2.AttrValue(i=self._level))
if self._children_inputs_mappings:
identity_op.op._set_attr(
OpHint.CHILDREN_INPUTS_MAPPINGS,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(_json.dumps(
self._children_inputs_mappings))))
if sort_index is not None:
identity_op.op._set_attr(
OpHint.FUNCTION_SORT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=sort_index))
if aggregate is not None:
identity_op.op._set_attr(
OpHint.FUNCTION_AGGREGATE_ATTR,
_attr_value_pb2.AttrValue(s=_compat.as_bytes((aggregate))))
# pylint: enable=protected-access
return identity_op
def __init__(self,
function_name,
level=1,
children_inputs_mappings=None,
**kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
level: OpHint level.
children_inputs_mappings: Children OpHint inputs/outputs mapping.
children_inputs_mappings should like below:
"parent_first_child_input":
[{"parent_input_index": num, "child_input_index": num}, ...]
"parent_last_child_output":
[{"parent_output_index": num, "child_output_index": num}, ...]
"internal_children_input_output":
[{"child_input_index": num, "child_output_index": num}, ...]
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._level = level
if self._level == 1:
assert children_inputs_mappings is None
else:
assert isinstance(children_inputs_mappings, dict)
self._children_inputs_mappings = children_inputs_mappings
if self._children_inputs_mappings is not None:
self._validate_children_inputs_mappings(self._children_inputs_mappings)
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._attrs_to_store_later = kwargs
self._stored_attrs = False
self._inputs = OpHint.OpHintArgumentTracker(
self._function_name, self._unique_function_id, "InputHint",
OpHint.FUNCTION_INPUT_INDEX_ATTR, level, self._children_inputs_mappings)
self._outputs = OpHint.OpHintArgumentTracker(
self._function_name, self._unique_function_id, "OutputHint",
OpHint.FUNCTION_OUTPUT_INDEX_ATTR, level,
self._children_inputs_mappings)
def _validate_children_inputs_mappings(self, children_inputs_mappings):
"""Validate children inputs mappings is in the right format.
Args:
children_inputs_mappings: the Children ophint inputs/outputs mapping.
"""
assert isinstance(children_inputs_mappings, dict)
assert "parent_first_child_input" in children_inputs_mappings
assert "parent_last_child_output" in children_inputs_mappings
assert "internal_children_input_output" in children_inputs_mappings
# validate parent_first_child_input.
def assert_dictlist_has_keys(dictlist, keys):
for dikt in dictlist:
assert isinstance(dikt, dict)
for key in keys:
assert key in dikt
assert_dictlist_has_keys(
children_inputs_mappings["parent_first_child_input"],
["parent_ophint_input_index", "first_child_ophint_input_index"])
assert_dictlist_has_keys(
children_inputs_mappings["parent_last_child_output"],
["parent_output_index", "child_output_index"])
assert_dictlist_has_keys(
children_inputs_mappings["internal_children_input_output"],
["child_input_index", "child_output_index"])
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_input(self, *args, **kwargs):
"""Add a wrapped input argument to the hint.
Args:
*args: The input tensor.
**kwargs:
"name" label
"tag" a tag to group multiple arguments that will be aggregated. I.e.
a string like 'cool_input'. Basically multiple inputs can be added
to the same hint for parallel operations that will eventually be
combined. An example would be static_rnn which creates multiple copies
of state or inputs.
"aggregate" aggregation strategy that is valid only for tag non None.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
"index_override" The global index to use. This corresponds to the
argument order in the final stub that will be generated.
Returns:
The wrapped input tensor.
"""
return self._inputs.add(*args, **kwargs)
def add_output(self, *args, **kwargs):
"""Add a wrapped output argument to the hint.
Args:
*args: The output tensor.
**kwargs:
"name" label
"tag" a tag to group multiple arguments that will be aggregated. I.e.
a string like 'cool_input'. Basically multiple inputs can be added
to the same hint for parallel operations that will eventually be
combined. An example would be static_rnn which creates multiple copies
of state or inputs.
"aggregate" aggregation strategy that is valid only for tag non None.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
"index_override" The global index to use. This corresponds to the
argument order in the final stub that will be generated.
Returns:
The wrapped output tensor.
"""
return self._outputs.add(*args, **kwargs)
def add_inputs(self, *args, **kwargs):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
**kwargs: This allows 'names' which should be a list of names.
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
if "names" in kwargs:
return [
self._inputs.add(arg, name=name)
for arg, name in zip(args, kwargs["names"])
]
else:
return [self._inputs.add(arg) for arg in args]
def add_outputs(self, *args, **kwargs):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
**kwargs: See
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
if "names" in kwargs:
return [
self._outputs.add(arg, name=name)
for arg, name in zip(args, kwargs["names"])
]
else:
return [self._outputs.add(arg) for arg in args]
class _LiteOperand(object):
"""Abstract operand for a tflite hint function._dynamic_rnn_loop.
This is a base class that handles representing arguments to an OpHint.
It also is able to serialize operands to the stubbed graph_def.
Child classes are responsible for being able to
store information about the hint identity operators. They are also responsible
for knowing how to serialize to output graphdefs.
Typically this will be implemented by holding one or more identity nodes
that were previously discovered as hints.
"""
def aggregate_and_return_name_for_input(self, out_graphdef):
"""This adds the node(s) to out_graphdef and returns the input node name.
Args:
out_graphdef: A graphdef that is ready to have this input added.
Returns:
The output that the stub should use as an input for this operand.
Raises:
RuntimeError: if the method is not implemented.
"""
del out_graphdef
raise RuntimeError("Unimplemented abstract method.")
def aggregate_and_return_name_for_output(self, fused_op_name, output_index,
out_graphdef):
"""Add node(s) to graph representing output operands and returns type.
Args:
fused_op_name: name of the fused op stub name.
output_index: Output index that we are currently processing from stub.
out_graphdef: The destination graphdef we are currently building up.
Returns:
The datatype of this identity.
Raises:
RuntimeError: if the method is not implemented.
"""
del fused_op_name, output_index, out_graphdef
raise RuntimeError("Unimplemented abstract method.")
class _LiteSingleOperand(_LiteOperand):
"""A simple operand that is non-aggregated (i.e. most hints)."""
def __init__(self, node):
_LiteOperand.__init__(self)
self.node = node
self.name = _tensor_name_base(node.name)
def flatten(self):
return [self.name]
def aggregate_and_return_name_for_input(self, out_graphdef):
return self.name
def aggregate_and_return_name_for_output(self, fused_op_name, index,
out_graphdef):
output_node = _copy.deepcopy(self.node)
del output_node.input[:]
output_node.input.append(_tensorflow_output_name(fused_op_name, index))
out_graphdef.node.extend([output_node])
return self.node.attr["type"].i
def __str__(self):
return str(self.name)
class _LiteAggregateOperand(_LiteOperand):
"""An operand for a tflite hint function that is aggregated from many.
For example, an LSTM is a grid of operators that are all related. Inputs
going into them may need to be fused, so they should all be tracked as
related arguments.
"""
def __init__(self, aggregation):
_LiteOperand.__init__(self)
self.aggregation = aggregation
self.names = {}
self.nodes = {}
self.flattened = None
def add(self, sort, node):
self.names[sort] = _tensor_name_base(node.name)
self.nodes[sort] = node
def flatten_nodes(self):
"""Return a list of all the node protos in aggregation sorted order."""
if not self.flattened:
self.flattened = [None] * len(self.nodes)
for idx, node in _six.iteritems(self.nodes):
self.flattened[idx] = node
for n in self.nodes:
if n is None:
raise RuntimeError("Aggregate was missing argument.")
if self.aggregation == OpHint.AGGREGATE_FIRST:
self.flattened = self.flattened[:1]
elif self.aggregation == OpHint.AGGREGATE_LAST:
self.flattened = self.flattened[-1:]
elif self.aggregation == OpHint.AGGREGATE_STACK:
pass
else:
raise ValueError(
"Invalid aggregation type %r specified" % self.aggregation)
return self.flattened
def flatten(self):
"""Return a list of all node names in aggregation sorted sorter."""
return [_tensor_name_base(x.name) for x in self.flatten_nodes()]
def aggregate_and_return_name_for_input(self, out_graphdef):
"""This adds the nodes to out_graphdef and returns an aggregated output.
In particular, if you have 4 inputs to a hint stub, this will be the
node that you can use as an output. I.e. you have 4 timesteps from a
static rnn, then a fused UnidriecitonalLSTM will expect 1 input with
all 4 time steps. So here we make a pack and return the output name of
that pack.
Args:
out_graphdef: A graphdef that is ready to have this input added.
Returns:
The name of a pack that aggregates this node.
"""
flattened = self.flatten_nodes()
if (self.aggregation == OpHint.AGGREGATE_FIRST) or (
self.aggregation == OpHint.AGGREGATE_LAST):
assert len(flattened) == 1
if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:
return _tensor_name_base(flattened[0].name)
else:
new_node = _node_def_pb2.NodeDef()
new_node.op = "Pack"
new_node.name = "OpHintStack-%s" % flattened[0].name
new_node.attr["N"].i = len(flattened)
new_node.attr["T"].type = flattened[0].attr["T"].type
for discrete in flattened:
new_node.input.append(_tensor_name_base(discrete.name))
out_graphdef.node.extend([new_node])
return new_node.name
def aggregate_and_return_name_for_output(self, fused_op_name, output_index,
out_graphdef):
"""This adds to `out_graphdef` all the unaggregated outputs.
I.e. we are outputting from a fused stub, but we need to make it compatible
with the unfused original graph so we insert an unpack. Ideally in a later
stage the unpack -> pack sequences will be removed.
Args:
fused_op_name: The name of the stub we are in the process of fusing.
output_index: The output output_index this object represents.
out_graphdef: The graphdef we are in the process of buildings
Returns:
The type of the aggregated output (so we can finish building the stub
op).
"""
flattened = self.flatten_nodes()
if (self.aggregation == OpHint.AGGREGATE_FIRST) or (
self.aggregation == OpHint.AGGREGATE_LAST):
assert len(flattened) == 1
if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:
temp_op = _LiteSingleOperand(flattened[0])
return temp_op.aggregate_and_return_name_for_output(
fused_op_name, output_index, out_graphdef)
else:
stack_node = _node_def_pb2.NodeDef()
stack_node.op = "Unpack"
stack_node.name = "OpHintUnstack-%s" % flattened[0].name
stack_node.attr["num"].i = len(flattened)
output_type = flattened[0].attr["T"].type
stack_node.attr["T"].type = output_type
stack_node.input.append(_tensorflow_output_name(
fused_op_name, output_index))
out_graphdef.node.extend([stack_node])
for idx, discrete in enumerate(flattened):
output_node = _copy.deepcopy(discrete)
del output_node.input[:]
output_node.input.append(_tensorflow_output_name(stack_node.name, idx))
out_graphdef.node.extend([output_node])
return output_type
def __str__(self):
s = "\t\t\tAGGREGATE %s\n" % self.aggregation
for sort, val in self.names.iteritems():
s += "\t\t\t%d: %s\n" % (sort, val)
return s
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Attributes:
inputs: inputs to the op (hash from index # to argument)
outputs: outputs to the op (hash from index # to argument)
function_name: the tflite custom op name to use
uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
level: Level of the OpHint.
children_inputs_mappings: If the Ophint has children, children inputs
mappings indicate how their inputs & outputs are mapped.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
self.level = -1
self.children_inputs_mappings = {}
def flattened_inputs_and_outputs(self):
"""Return a list of inputs and outputs in a flattened format.
Returns:
Tuple of (inputs, outputs). where input and output i a list of names.
"""
def _flatten(input_or_output_dict):
flattened_items = []
for item in input_or_output_dict.values():
flattened_items.extend(item.flatten())
return flattened_items
return _flatten(self.inputs), _flatten(self.outputs)
def __str__(self):
def format_args(items):
s = ""
for idx, item in items.iteritems():
s += ("\t\t%d:\n" % idx) + str(item)
return s
inputs_str = "\tInputs\n" + format_args(self.inputs)
outputs_str = "\tOutputs\n" + format_args(self.outputs)
return (
"tflite function %s call %s level %d "
"\n\tinputs:\n\t\t%s\n\toutputs:\n\t\t%s" %
(self.function_name, self.uuid, self.level, inputs_str, outputs_str))
def _find_all_hints_in_nodes(nodes):
"""Look at the all the input nodes and return a list of LiteFuncCall objs.
Args:
nodes: A TensorFlow graph_def to look for LiteFuncCalls.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
for node in nodes:
attr = node.attr
# This is an op hint if it has a FUNCTION_UUID_ATTR, otherwise skip
if (OpHint.FUNCTION_UUID_ATTR not in attr
or not attr[OpHint.FUNCTION_UUID_ATTR].s):
continue
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
# Start building function
call_def = func_calls[uuid]
call_def.uuid = uuid
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
call_def.level = attr[OpHint.FUNCTION_LEVEL_ATTR].i
# Get sorting and aggregation information
sort = (attr[OpHint.FUNCTION_SORT_INDEX_ATTR].i
if OpHint.FUNCTION_SORT_INDEX_ATTR in attr else None)
if sort == -1: sort = None
aggregation = None
if OpHint.FUNCTION_AGGREGATE_ATTR in attr:
aggregation = _compat.as_text(attr[OpHint.FUNCTION_AGGREGATE_ATTR].s)
if OpHint.CHILDREN_INPUTS_MAPPINGS in attr:
call_def.children_inputs_mappings = _json.loads(
_compat.as_text(attr[OpHint.CHILDREN_INPUTS_MAPPINGS].s))
# Add the input or output
def put_operand(stuff, index, sort, operand, aggregation):
"""Add a given index into the function structure."""
if sort is None:
stuff[index] = _LiteSingleOperand(operand)
else:
if index not in stuff:
stuff[index] = _LiteAggregateOperand(aggregation)
stuff[index].add(sort, operand)
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
put_operand(call_def.inputs, attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i,
sort, node, aggregation)
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
put_operand(call_def.outputs, attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i,
sort, node, aggregation)
# Remember attributes
for a in attr:
if a.startswith("_tflite_attr_"):
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _extract_topology_sequence_mapping(nodes):
return dict(
(_tensor_name_base(node.name), idx) for idx, node in enumerate(nodes))
def _find_children_hints_in_while_loop(function_def, nodes_mapping):
"""Find children hints and all nodes inside the while loop.
Args:
function_def: Function def of the while loop.
nodes_mapping: While loop input_arg : real node name.
Returns:
Ordered children hints and all re-mapped nodes inside the while loop.
"""
new_nodes = []
# Make nodes inside function def inputs point to the real nodes.
for node in function_def.node_def:
for i, _ in enumerate(node.input):
if node.input[i] in nodes_mapping:
node.input[i] = nodes_mapping[node.input[i]]
new_nodes.append(_copy.deepcopy(node))
name_to_seq_num = _extract_topology_sequence_mapping(function_def.node_def)
children_hints = _find_all_hints_in_nodes(new_nodes)
children_hints_q = []
# Ordered by the outputs.
for hint in _six.itervalues(children_hints):
_, output_names = hint.flattened_inputs_and_outputs()
seq = name_to_seq_num[output_names[0]]
for output_name in output_names:
seq = min(seq, name_to_seq_num[output_name])
children_hints_q.append((seq, hint))
children_hints_q.sort(key=lambda tup: tup[0])
ordered_children_hints = [x[1] for x in children_hints_q]
return ordered_children_hints, new_nodes
def _find_children_hints(call, graph_def):
"""Find all children hints.
For a given OpHint, we find all children hints inside it, we also copy all the
nodes inside function defs (if applicable) to the original graph_def, they are
returned in a list as well.
Args:
call: Parent OpHint that contains children ophints.
graph_def: Original graph def.
Returns:
Ordered children hints inside the parent ophint; new graph def that contains
nodes inside function defs (if applicable); nodes inside function defs.
"""
name_to_input_name, _, _ = _extract_graph_summary(graph_def)
input_names, output_names = call.flattened_inputs_and_outputs()
reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)
reachable_by_output = _bfs_for_reachable_nodes(output_names,
name_to_input_name)
output_nodes_set = set(output_names)
children_hints = []
out = _graph_pb2.GraphDef()
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
function_def_nodes = set()
for node in graph_def.node:
out.node.extend([_copy.deepcopy(node)])
n = _tensor_name_base(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
# special handle for while loop function def.
if node.op == "While" or node.op == "StatelessWhile":
body_name = node.attr["body"].func.name
inputs_outside_loop = node.input
for function_def in graph_def.library.function:
if function_def.signature.name == body_name:
function_inputs = function_def.signature.input_arg
assert len(inputs_outside_loop) == len(function_inputs)
nodes_mapping = {}
for i, function_input in enumerate(function_inputs):
nodes_mapping[function_input.name] = inputs_outside_loop[i]
# TODO(b/123050804): Consider use grappler.
(children_hints_in_loop,
new_nodes) = _find_children_hints_in_while_loop(
function_def, nodes_mapping)
function_def_nodes.update([x.name for x in new_nodes])
children_hints.extend(children_hints_in_loop)
out.node.extend(new_nodes)
return children_hints, out, function_def_nodes
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
if full_tensor_name.startswith("^"):
return full_tensor_name[1:]
return full_tensor_name.split(":")[0]
def _tensorflow_output_name(tensor_name, output_index):
return tensor_name if output_index == 0 else "%s:%d" % (tensor_name,
output_index)
# TODO(aselle): This should be converted to grappler in the future.
def _check_subgraph_closed(n, reachable_by_input, input_nodes_set,
name_to_input_name):
"""Checks to make sure node only connects to predecessor graph through inputs.
Args:
n: Node to check
reachable_by_input: Nodes that are reachable by all inputs of subgraph
input_nodes_set: The set of nodes that are "inputs".
name_to_input_name: Maps from name to the list of inputs.
Raises:
TypeError: If the given node uses items past inputs directly.
"""
next_to_visit = [n]
visited = set()
while next_to_visit:
current_node = next_to_visit.pop()
visited.add(current_node)
if (current_node in reachable_by_input
and current_node not in input_nodes_set):
raise TypeError(
"Node %s uses input %s not in input_nodes." % (n, current_node))
if current_node not in input_nodes_set:
next_to_visit += [
input_node for input_node in name_to_input_name[current_node]
if input_node not in visited
]
# TODO(aselle): This should be converted to grappler in the future.
def _convert_single_op_hint_to_stub(call,
graph_def,
function_def_nodes=None,
is_last_run=True):
"""Given a graph_def, converts `call` into a stub and returns a new graph_def.
Args:
call: A single function call to be converted.
graph_def: A graph_def to use as input (that has call obviously).
function_def_nodes: Nodes inside the function def those are not connected to
the graph.
is_last_run: Whether it is the last run for a given pass (for OpHint has
children).
Returns:
A new transformed graph-def that has call as a stub (single op).
Note: after this process, the graph_def can no longer be loaded into
the tensorflow runtime, so all future manipulations are done in graph_def
level.
"""
if function_def_nodes is None:
function_def_nodes = set()
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
input_names, output_names = call.flattened_inputs_and_outputs()
reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)
reachable_by_output = _bfs_for_reachable_nodes(output_names,
name_to_input_name)
output_nodes_set = set(output_names)
nodes_after_fuse = []
nodes_deleted_by_fuse = set()
# Classify each node. We want to keep everything reachable by input, but
# we don't know if things that are not reachable by output or input (things
# after fusing).
for node in graph_def.node:
n = _tensor_name_base(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
nodes_deleted_by_fuse.add(n)
elif n not in reachable_by_input and n not in function_def_nodes:
# n is a node that after all the fusings, so keep it.
nodes_after_fuse.append(n)
else:
# In the last run, n is a node that is randomly in the graph but not
# connected to the chain of dependencies, we will delete n, otherwise
# we keep them.
if not is_last_run:
nodes_after_fuse.append(n)
# Make a new graphdef with all the pre-input and input nodes
out = _graph_pb2.GraphDef()
reachable_by_input_sorted = sorted(
list(reachable_by_input), key=lambda n: name_to_seq_num[n])
for node in reachable_by_input_sorted:
out.node.extend([_copy.deepcopy(name_to_node[node])])
# Create any stacks to aggregate arguments into to a single input
# i.e. for static_rnn's.
# TODO(aselle): Check that the inputs are complete i.e. 0 to n-1
sorted_input_indices = list(call.inputs.keys())
sorted_input_indices.sort()
sorted_output_indices = list(call.outputs.keys())
sorted_output_indices.sort()
new_node = _node_def_pb2.NodeDef()
# Delegate to each operand to produce the proper new input for this stub node.
# In particular, an aggregate input will now be a Pack of some previously
# non-fused things.
for input_index in sorted_input_indices:
inputs = call.inputs[input_index]
input_name = inputs.aggregate_and_return_name_for_input(out)
new_node.input.append(input_name)
new_node.attr[OpHint.TFLITE_INPUT_INDICES].list.i.extend(sorted_input_indices)
# Create the function
new_node.op = call.function_name
new_node.name = call.uuid
out.node.extend([new_node])
# Now call each output argument to give them a chance to make the proper
# output type and add it to our new_node.
output_dtypes = []
for output_index in sorted_output_indices:
output = call.outputs[output_index]
output_dtype = (
output.aggregate_and_return_name_for_output(new_node.name, output_index,
out))
output_dtypes.append(output_dtype)
new_node.attr["_output_types"].list.type[:] = output_dtypes
# TODO(aselle): what is right here?
new_node.attr["_output_quantized"].b = False
# Add post output nodes that do not depend on the outputs
for n in nodes_after_fuse:
should_keep = True
for input_name in name_to_input_name[n]:
if input_name in nodes_deleted_by_fuse:
should_keep = False
if should_keep:
out.node.extend([_copy.deepcopy(name_to_node[n])])
# Misc. graph_def data that needs copying.
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
# TODO(aselle): This should be converted to grappler in the future.
def _remove_one_redundant_stack_unstack(in_graph_def):
"""Removes a stack->unstack pattern from in_graph_def in a returned graph.
Args:
in_graph_def: Graph def to use as input.
Returns:
Simplified tuple (graph_def, changed_something) where changed_something
is true if anything was done.
"""
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
in_graph_def)
del name_to_seq_num
# TODO(aselle): Make this not hardcoded.
do_generic_pack_unpack = True
out = _graph_pb2.GraphDef()
out.library.CopyFrom(in_graph_def.library)
out.versions.CopyFrom(in_graph_def.versions)
for n in in_graph_def.node:
node_name = _tensor_name_base(n.name)
if not node_name.startswith("OpHintStack") and not n.op.startswith("Pack"):
continue
next_to_visit = [node_name]
visited = set()
unpack_nodes = set()
pack_node = node_name
# Find a pattern of unstack connected to a stack (with identities
# in between.
matches_pattern = True
is_hint_created_stack = False
while next_to_visit:
current_node_name = next_to_visit[0]
visited.add(current_node_name)
del next_to_visit[0]
node = name_to_node[current_node_name]
is_op_hint_stack = node.name.startswith("OpHintStack")
is_op_hint_unstack = node.name.startswith("OpHintUnstack")
if (node.op == "Identity" or is_op_hint_stack
or (do_generic_pack_unpack and node.op == "Pack")):
is_hint_created_stack |= is_op_hint_stack
next_to_visit += [
input_node for input_node in name_to_input_name[current_node_name]
if input_node not in visited
]
elif (is_op_hint_unstack
or (do_generic_pack_unpack and node.op == "Unpack")):
unpack_nodes.add(node.name)
is_hint_created_stack &= is_op_hint_unstack
else:
matches_pattern = False
break
visited.add(node.name)
if matches_pattern and len(unpack_nodes) == 1:
pack_node = node_name
# Check to see if anyone depends on the intermediate identity or the
# Unstacked form
no_external_dependency = True
for other_n in in_graph_def.node:
if other_n.name in visited: continue
for input_tensor in name_to_input_name[other_n.name]:
input_op = _tensor_name_base(input_tensor)
if input_op in visited and input_op != pack_node:
no_external_dependency = False
# Proceed with the substitution if the stack/unstack pair was created
# through hints, or that it was not, but nobody is consuming things
# between the stack and unstack.
if is_hint_created_stack or no_external_dependency:
end = unpack_nodes.pop()
end_input = name_to_node[end].input[0]
# All nodes that depend on the final stack need to be redone to use
for other_n in in_graph_def.node:
node_name = _tensor_name_base(other_n.name)
if node_name not in visited:
new_node = _copy.deepcopy(other_n)
new_node.input[:] = [
(end_input if stripped == pack_node else
non_stripped) for stripped, non_stripped in zip(
name_to_input_name[node_name], new_node.input[:])
]
out.node.extend([new_node])
return out, True
return in_graph_def, False
def _remove_redundant_stack_unstack(graph_def):
curr = graph_def
del graph_def
changed_stuff = True
while changed_stuff:
curr, changed_stuff = _remove_one_redundant_stack_unstack(curr)
return curr
def _get_correct_mapping(original_index, nodes):
# Special handle for the index is -1 case.
# If it is -1, return the last index.
if original_index == -1:
node_indices = nodes.keys()
node_indices = sorted(node_indices)
return node_indices[-1]
else:
return original_index
return original_index
def _convert_op_hints_to_stubs_helper(
graph_def, write_callback=lambda sess, graph_def: None):
"""Converts a graph_def to a new graph_def where all op hints are stubbed.
Args:
graph_def: A graph def that we should convert.
write_callback: A function pointer that can be used to write intermediate
steps of graph transformation (optional).
Returns:
A new stubbed graph_def.
"""
hints = _find_all_hints_in_nodes(graph_def.node)
hints_q = []
for hint in _six.itervalues(hints):
hints_q.append((hint.level, hint.uuid))
hints_q.sort(key=lambda tup: tup[0])
for i in range(len(hints_q) - 1, -1, -1):
level, hint_uuid = hints_q[i]
curr_graph_def = graph_def
del graph_def # prevent using graph_def again (common source of error)
for i in range(len(hints_q) - 1, -1, -1):
level, hint_uuid = hints_q[i]
if level >= 2:
children_hints, curr_graph_def, function_def_nodes = _find_children_hints(
hints[hint_uuid], curr_graph_def)
# pylint: disable=superfluous-parens
assert (len(children_hints) > 0) # pylint: disable=g-explicit-length-test
# pylint: enable=superfluous-parens
# Re-wire the children hints inputs/outputs, so latter child's inputs
# connect to previous child node's outputs.
children_inputs_mappings = hints[hint_uuid].children_inputs_mappings
for j, child_hint in enumerate(children_hints):
if j == 0:
for mapping in children_inputs_mappings["parent_first_child_input"]:
parent_input_index = _get_correct_mapping(
mapping["parent_ophint_input_index"], hints[hint_uuid].inputs)
child_input_index = _get_correct_mapping(
mapping["first_child_ophint_input_index"], child_hint.inputs)
child_hint.inputs[child_input_index] = hints[hint_uuid].inputs[
parent_input_index]
else:
for mapping in children_inputs_mappings[
"internal_children_input_output"]:
input_index = _get_correct_mapping(mapping["child_input_index"],
child_hint.inputs)
output_index = _get_correct_mapping(mapping["child_output_index"],
children_hints[j - 1].outputs)
child_hint.inputs[input_index] = children_hints[
j - 1].outputs[output_index]
if j == len(children_hints) - 1:
for mapping in children_inputs_mappings["parent_last_child_output"]:
parent_output_index = _get_correct_mapping(
mapping["parent_output_index"], hints[hint_uuid].outputs)
child_output_index = _get_correct_mapping(
mapping["child_output_index"], child_hint.outputs)
child_hint.outputs[child_output_index] = hints[hint_uuid].outputs[
parent_output_index]
for j, child_hint in enumerate(children_hints):
curr_graph_def = _convert_single_op_hint_to_stub(
child_hint, curr_graph_def, function_def_nodes,
j == len(children_hints) - 1)
else:
curr_graph_def = _convert_single_op_hint_to_stub(hints[hint_uuid],
curr_graph_def)
write_callback(curr_graph_def, "initial")
# The stubbing process can create stacks/unstacks in the case of LSTMs
# remove them.
curr_graph_def = _remove_redundant_stack_unstack(curr_graph_def)
return curr_graph_def
def find_all_hinted_output_nodes(session=None, graph_def=None):
"""Find all Ophints output nodes in the graph.
This is used to get all the output nodes those are ophinted, it is important
for operation like convert_variables_to_constants keep all ophints structure.
Note: only one of session or graph_def should be used, not both.
Why this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can
generate multiple outputs for unfused subgraph. If not all output nodes are
consumed, graph optimization can potentially drop the unused nodes and cause
ophints in an invalid states (due to missing ophinted output nodes). So it's
important for us to find all those hinted output nodes and make sure they're
not discarded away.
Args:
session: A TensorFlow session that contains the graph to convert.
graph_def: A graph def that we should convert.
Returns:
A list of OpHints output nodes.
Raises:
ValueError: If both session and graph_def are provided.
"""
if session is not None and graph_def is not None:
raise ValueError("Provide only one of session and graph_def.")
hinted_outputs_nodes = []
if session is not None:
hints = _find_all_hints_in_nodes(session.graph_def.node)
elif graph_def is not None:
hints = _find_all_hints_in_nodes(graph_def.node)
for hint in _six.itervalues(hints):
_, output_nodes = hint.flattened_inputs_and_outputs()
hinted_outputs_nodes.extend(output_nodes)
return hinted_outputs_nodes
@_tf_export(v1=["lite.experimental.convert_op_hints_to_stubs"])
def convert_op_hints_to_stubs(session=None,
graph_def=None,
write_callback=lambda graph_def, comments: None):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Note: only one of session or graph_def should be used, not both.
Args:
session: A TensorFlow session that contains the graph to convert.
graph_def: A graph def that we should convert.
write_callback: A function pointer that can be used to write intermediate
steps of graph transformation (optional).
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
Raises:
ValueError: If both session and graph_def are provided.
"""
if session is not None and graph_def is not None:
raise ValueError("Provide only one of session and graph_def.")
if session is not None:
return _convert_op_hints_to_stubs_helper(session.graph_def, write_callback)
elif graph_def is not None:
return _convert_op_hints_to_stubs_helper(graph_def, write_callback)
else:
raise ValueError("Must specify session or graph_def as input.")
_allowed_symbols = [
"OpHint", "convert_op_hints_to_stubs", "convert_op_hints_to_stubs_new",
"find_all_hinted_output_nodes"
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/op_hint.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return output.decode()
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
def toco_convert_protos(model_flags_str,
toco_flags_str,
input_data_str,
debug_info_str=None,
enable_mlir_converter=False):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information. (default None)
enable_mlir_converter: Enables the MLIR converter instead of the TOCO
converter. (default False)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str,
debug_info_str,
enable_mlir_converter)
return model_str
except Exception as e:
raise ConverterError(str(e))
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input, \
_tempfile.NamedTemporaryFile(delete=False) as fp_debug:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
debug_filename = fp_debug.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(input_data_str)
debug_info_str = debug_info_str if debug_info_str else ""
# if debug_info_str contains a "string value", then the call to
# fp_debug.write(debug_info_str) will fail with the following error
#
# TypeError: a bytes-like object is required, not 'str'
#
# Some of the subtests within the "convert_test" unit-test fail
# with the error shown above. So watch out for that scenario and
# convert debug_info_str to bytes where needed
if not isinstance(debug_info_str, bytes):
fp_debug.write(debug_info_str.encode("utf-8"))
else:
fp_debug.write(debug_info_str)
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin,
model_filename,
toco_filename,
input_filename,
output_filename,
"--debug_proto_file={}".format(debug_filename),
]
if enable_mlir_converter:
cmd.append("--enable_mlir_converter")
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
change_concat_input_ranges=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False,
debug_info=None):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length
as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
quantize_to_float16: Boolean indicating whether to convert float buffers
to float16. (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist
or are unused in the final graph. (default False)
debug_info: `GraphDebugInfo` proto containing the stack traces for the
original nodes referred by the converted graph.
Returns:
model_flags, toco_flags, debug_info: three protocol buffers describing the
conversion process and debug information.
Raises:
ValueError:
If the input tensor type is unknown
Missing mean_values or std_dev_values
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = util.convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
toco.post_training_quantize = post_training_quantize
toco.quantize_to_float16 = quantize_to_float16
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = util.convert_dtype_to_tflite_type(
input_tensor.dtype)
if toco.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if not quantized_input_stats:
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(map(int, shape))
for output_tensor in output_tensors:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco, debug_info
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
enable_mlir_converter: Enables the MLIR converter instead of the TOCO
converter.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, _ = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(map(int, shape))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
enable_mlir_converter=enable_mlir_converter)
return data
def toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
enable_mlir_converter: Enables the MLIR converter instead of the TOCO
converter.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, debug_info = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
debug_info_str = debug_info.SerializeToString() if debug_info else None
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
debug_info_str=debug_info_str,
enable_mlir_converter=enable_mlir_converter)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
enable_mlir_converter = kwargs.get("enable_mlir_converter", False)
return toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/convert.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to MLIR-TFLite converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import tracking
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testString(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([4] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('Reshape', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([2, 2] == output_details[0]['shape']).all())
def testQuantization(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.experimental_enable_mlir_converter = True
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertGreater(output_details[0]['quantization'][0], 0) # scale
def testScalarValid(self):
# Construct a graph using a scalar (empty shape) input.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test conversion with the scalar input shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertEqual(len(input_details[0]['shape']), 0)
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertEqual(len(output_details[0]['shape']), 0)
# Validate inference using the scalar inputs/outputs.
test_input = np.array(4.0, dtype=np.float32)
expected_output = np.array(8.0, dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testPostTrainingQuantize(self):
self.skipTest('b/124315492')
np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_converter.experimental_enable_mlir_converter = True
float_tflite = float_converter.convert()
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.experimental_enable_mlir_converter = True
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_tflite = quantized_converter.convert()
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
@test_util.run_in_graph_and_eager_modes
def testFunctions(self):
"""Tests tf.function in 1.X."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = variables.Variable(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
output_node = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
sess.run(variables.variables_initializer([variable_node]))
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [placeholder],
[output_node])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output_node', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
class FromConcreteFunctionTest(test_util.TensorFlowTestCase):
def _evaluateTFLiteModel(self, tflite_model, input_data):
"""Evaluates the model on the `input_data`."""
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
interpreter.invoke()
return [
interpreter.get_tensor(details['index']) for details in output_details
]
def _getSimpleVariableModel(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
return root
@test_util.run_v2_only
def testFloat(self):
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testControlFlow(self):
input_data = {
'x': constant_op.constant([1., 2.], shape=[1, 2]),
'b': constant_op.constant(True)
}
weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)
def true_fn(x):
return math_ops.matmul(x, weights)
def false_fn(x):
return math_ops.add(x, weights)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)
])
def model(x, b):
return control_flow_ops.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(**input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data['x'], input_data['b']])[0]
np.testing.assert_almost_equal(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testStaticRnn(self):
input_data = constant_op.constant(
np.array(np.random.random_sample((3, 10)), dtype=np.float32))
cell = rnn_cell_impl.LSTMCell(10)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[3, 10], dtype=dtypes.float32)
])
def model(x):
seq = array_ops.split(x, 3, 0)
return rnn.static_rnn(
cell, seq, dtype=dtypes.float32, sequence_length=[1])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)[0]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(expected.numpy(), actual)
@test_util.run_v2_only
def testLoop(self):
input_data = constant_op.constant([1., 2., 3., 4.], shape=[2, 2])
weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)
def condition(x):
return math_ops.reduce_sum(x) < 100
def body(x):
return math_ops.add(x, weights)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[2, 2], dtype=dtypes.float32)
])
def model(x):
return control_flow_ops.while_loop(condition, body, [x])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
np.testing.assert_almost_equal(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testDynamicRnn(self):
input_data = constant_op.constant(
np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32))
cell = rnn_cell_impl.LSTMCell(10)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[3, 10, 10], dtype=dtypes.float32)
])
def model(x):
return rnn.dynamic_rnn(cell, x, dtype=dtypes.float32)
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
if isinstance(expected, ops.EagerTensor):
expected = expected.numpy()
else:
expected = expected.c.numpy()
np.testing.assert_almost_equal(expected, actual)
@test_util.run_v2_only
def testKerasLSTM(self):
self.skipTest('b/138657502')
input_data = constant_op.constant(
np.array(np.random.random_sample((10, 10, 10)), dtype=np.float32))
model = keras.models.Sequential(
[keras.layers.LSTM(units=10, input_shape=(10, 10))])
run_model = def_function.function(model.__call__)
concrete_func = run_model.get_concrete_function(
tensor_spec.TensorSpec((10, 10, 10), dtype=dtypes.float32))
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_enable_mlir_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(expected, actual)
class TestFlexMode(test_util.TensorFlowTestCase):
def testSession(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_enable_mlir_converter = True
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
tflite_model = converter.convert()
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
@test_util.run_v2_only
def testConcreteFunc(self):
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_enable_mlir_converter = True
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
tflite_model = converter.convert()
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/lite_mlir_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python.convert import ConverterError
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class TestModels(test_util.TensorFlowTestCase):
def assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_test.py', file_names)
self.assertNotIn('lite_v2_test.py', file_names)
class FromConstructor(TestModels):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TFLiteConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
class FromSessionTest(TestModels, parameterized.TestCase):
def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testString(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([4] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('Reshape', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([2, 2] == output_details[0]['shape']).all())
# TODO(b/122659643): Test setting/getting string data via the python
# interpreter API after support has been added.
def testQuantization(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testIntermediateInputArray(self):
"""Convert a model from an intermediate input array."""
with ops.Graph().as_default():
in_tensor_init = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
in_tensor_final = in_tensor_init + in_tensor_init
out_tensor = in_tensor_final + in_tensor_final
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor_final],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('add', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSizeNoneInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test None as shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
def testScalarValid(self):
# Construct a graph using a scalar (empty shape) input.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test conversion with the scalar input shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
# Validate inference using the scalar inputs/outputs.
test_input = np.array(4.0, dtype=np.float32)
expected_output = np.array(8.0, dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testSizeInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testBatchSizeNonZero(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[None, 4], dtype=dtypes.float32, name='input1')
in_tensor_2 = array_ops.placeholder(
shape=[4, 10], dtype=dtypes.float32, name='input2')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2)
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual('input1', input_details[0]['name'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual('input2', input_details[1]['name'])
self.assertTrue(([4, 10] == input_details[1]['shape']).all())
def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testGraphviz(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
def testDumpGraphviz(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
converter.dump_graphviz_video = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure graphviz folder has more data after using video flag.
num_items_graphviz_video = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz_video > num_items_graphviz)
def testInferenceInputType(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testPostTrainingQuantizeDeprecatedAttribute(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
self.assertFalse(quantized_converter.post_training_quantize)
quantized_converter.post_training_quantize = True
self.assertTrue(quantized_converter.post_training_quantize)
self.assertEqual(quantized_converter.optimizations, [lite.Optimize.DEFAULT])
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
def testPostTrainingQuantize(self):
np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def _getCalibrationQuantizeModel(self):
np.random.seed(0)
inp = array_ops.placeholder(
dtype=dtypes.float32, shape=(1, 5, 5, 3), name='input')
conv = nn_ops.conv2d(
inp,
filter=array_ops.ones([3, 3, 3, 16]),
strides=[1, 1, 1, 1],
padding='SAME')
output = nn_ops.relu(conv, name='output')
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
return (inp, output, calibration_gen)
def testPostTrainingCalibrateAndQuantize(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
def testCalibrateAndQuantizeBuiltinInt8(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert model by specifying target spec (instead of optimizations), since
# when targeting an integer only backend, quantization is mandatory.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
@parameterized.named_parameters(
# Quantize to Float16 even if rep data provided.
('UseRepresentativeData', True, False, True, False, False),
# Quantize to Float16 if no rep data provided.
('NoRepresentativeData', False, False, True, False, False),
# Post training quantization if both rep data and int8 included.
('UseRepresentativeDataIncludeInt8', True, True, False, False, True),
# Error if no rep data and int8 included.
('NoRepresentativeDataIncludeInt8', False, True, False, True, False))
def testQuantizeFloat16(self, use_rep_data, include_int8,
is_float16_quantized, is_error,
is_post_training_quantized):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
interpreter = Interpreter(model_content=float_tflite)
interpreter.allocate_tensors()
self.assertEqual(interpreter.get_tensor_details()[0]['name'], 'Conv2D_bias')
self.assertEqual(interpreter.get_tensor_details()[0]['dtype'],
lite.constants.FLOAT)
# Convert model to quantized version
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.target_spec.supported_types = [lite.constants.FLOAT16]
if include_int8:
quantized_converter.target_spec.supported_types.append(
lite.constants.INT8)
if use_rep_data:
quantized_converter.representative_dataset = calibration_gen
if is_error:
with self.assertRaises(ValueError) as error:
quantized_converter.convert()
self.assertEqual(
'representative_dataset is required when specifying '
'TFLITE_BUILTINS_INT8 or INT8 supported types.', str(error.exception))
else:
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
self.assertEqual(interpreter.get_tensor_details()[0]['name'],
'Conv2D_bias')
if is_float16_quantized:
# Verify that bias constant is float16 type.
self.assertEqual(interpreter.get_tensor_details()[0]['dtype'],
lite.constants.FLOAT16)
elif is_post_training_quantized:
# Verify that bias constants is int32 type.
self.assertEqual(interpreter.get_tensor_details()[0]['dtype'],
lite.constants.INT32)
else:
raise ValueError('Invalid test options.')
def testInvalidQuantizeFloat16(self):
with ops.Graph().as_default():
inp, output, _ = self._getCalibrationQuantizeModel()
sess = session.Session()
# Specify float16 quantization
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.target_spec.supported_types = [lite.constants.FLOAT16]
# Specifiy only int8 builtin ops
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
with self.assertRaises(ValueError) as error:
quantized_converter.convert()
self.assertEqual(
'TFLITE_BUILTINS_INT8 requires smallest supported type to be INT8.',
str(error.exception))
def testInvalidPostTrainingQuantize(self):
np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Attempt to convert to quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
# Restricting to int8 type only
quantized_converter.target_spec.supported_types = [lite.constants.INT8]
# A representative dataset is required for full fixed point quantization.
with self.assertRaises(ValueError) as error:
quantized_converter.convert()
self.assertEqual(
'representative_dataset is required when specifying '
'TFLITE_BUILTINS_INT8 or INT8 supported types.', str(error.exception))
def testPostTrainingCalibrateAndQuantizeFloatNotAllowed(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_converter.target_spec.supported_types = [lite.constants.INT8]
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that restricting supported types to int8 forces
# all fixed point ops/tensors in converter.
self.assertTrue(quantized_converter._is_int8_target_required())
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
def testPostTrainingCalibrateAndQuantizeInt8Inputs(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.inference_input_type = lite_constants.INT8
quantized_converter.inference_output_type = lite_constants.INT8
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The input and output types should be int8.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.int8, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.int8, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def testFloatTocoConverter(self):
"""Tests deprecated test TocoConverter."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the interpreter is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testMultipleOutputNodeNames(self):
"""Tests converting a graph with an op that have multiple outputs."""
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
out0, out1, out2, out3 = array_ops.split(
input_tensor, [1, 1, 1, 1], axis=0)
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [input_tensor],
[out0, out1, out2, out3])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
interpreter.set_tensor(input_details[0]['index'],
np.asarray([1.0, 2.0, 3.0, 4.0], dtype=np.float32))
interpreter.invoke()
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual(1.0, interpreter.get_tensor(output_details[0]['index']))
self.assertEqual(2.0, interpreter.get_tensor(output_details[1]['index']))
self.assertEqual(3.0, interpreter.get_tensor(output_details[2]['index']))
self.assertEqual(4.0, interpreter.get_tensor(output_details[3]['index']))
@test_util.run_in_graph_and_eager_modes
def testFunctions(self):
"""Tests tf.function in 1.X."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = variables.Variable(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
output_node = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
sess.run(variables.variables_initializer([variable_node]))
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [placeholder],
[output_node])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output_node', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInferenceInputOutputTypeFloatDefault(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testInferenceInputOutputTypeQuantizedUint8Default(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testReusingConverterWithDifferentPostTrainingQuantization(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.post_training_quantize = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
converter.post_training_quantize = False
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testResizingIntermediateDynamicTensor(self):
# This is a regression test for the case where shape of dynamic output
# tensors changes between invocations.
# See also https://github.com/tensorflow/tensorflow/issues/26549
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(shape=[1, 1], dtype=dtypes.float32)
input2_tensor = array_ops.placeholder(shape=[1], dtype=dtypes.float32)
# The bug is triggered only when dynamic tensor is intermediate. Putting
# some other ops around it.
neg = math_ops.negative(input2_tensor)
padding = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int32)
output_tensor = array_ops.pad(input_tensor, padding) + neg
sess = session.Session()
converter = lite.TFLiteConverter.from_session(
sess, [input_tensor, padding, input2_tensor], [output_tensor])
tflite_model = converter.convert()
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[1]['index'],
np.array([[1, 1], [1, 1]], dtype=np.int32))
interpreter.invoke()
# Without the fix, invocation will fail when changing the shape of
# intermediate dynamic tensors.
interpreter.set_tensor(input_details[1]['index'],
np.array([[2, 2], [2, 2]], dtype=np.int32))
interpreter.invoke()
def testGraphDebugInfo(self):
"""Test a session has debug info captured."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = variables.Variable(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
output_node = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
sess.run(variables.variables_initializer([variable_node]))
converter = lite.TFLiteConverter.from_session(sess, [placeholder],
[output_node])
converter.convert()
self.assertValidDebugInfo(converter._debug_info)
# Check the add node in the inlined function is included.
func = sess.graph.as_graph_def().library.function[0].signature.name
self.assertIn((func + 'add'), converter._debug_info.traces)
class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
def testFloatTocoConverter(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testGraphDebugInfo(self):
"""Test a frozen graph doesn't have debug info captured."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
converter.convert()
# GraphDebugInfo should be none for frozen graph.
self.assertTrue(not converter._debug_info)
class FromFrozenGraphObjectDetection(test_util.TensorFlowTestCase):
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
# Looks for the model file which is saved in a different location internally
# and externally.
filename = resource_loader.get_path_to_datafile('testdata/tflite_graph.pb')
if not os.path.exists(filename):
filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
'../tflite_mobilenet_ssd_quant_protobuf/tflite_graph.pb')
if not os.path.exists(filename):
raise IOError("File '{0}' does not exist.".format(filename))
self._graph_def_file = filename
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}
def testTFLiteGraphDef(self):
# Tests the object detection model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
converter = lite.TFLiteConverter.from_frozen_graph(self._graph_def_file,
self._input_arrays,
self._output_arrays,
self._input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('TFLite_Detection_PostProcess:1',
output_details[1]['name'])
self.assertTrue(([1, 10] == output_details[1]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:2',
output_details[2]['name'])
self.assertTrue(([1, 10] == output_details[2]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:3',
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# Missing `input_shapes`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(self._graph_def_file,
self._input_arrays,
self._output_arrays)
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
def testTFLiteGraphDefInvalidShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file,
self._input_arrays,
self._output_arrays,
input_shapes={'invalid-value': [1, 19]})
self.assertEqual(
'input_shapes must contain a value for each item in input_array.',
str(error.exception))
class FromSavedModelTest(TestModels):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
# Since we only partially specify the input, this is not allowed.
with self.assertRaises(ConverterError):
_ = converter.convert()
# Check case where input shape is None.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
# Since we only partially specify the input, this is not allowed.
with self.assertRaises(ConverterError):
_ = converter.convert()
def testSimpleModelTocoConverter(self):
"""Test a SavedModel with deprecated TocoConverter."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testGraphDebugInfo(self):
"""Test a SavedModel has debug info captured."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.convert()
self.assertValidDebugInfo(converter._debug_info)
class MyAddLayer(keras.layers.Layer):
def __init__(self, increment, **kwargs):
super(MyAddLayer, self).__init__(**kwargs)
self._increment = increment
def call(self, inputs):
return inputs + self._increment
def get_config(self):
config = super(MyAddLayer, self).get_config()
config['increment'] = self._increment
return config
class FromKerasFile(TestModels, parameterized.TestCase):
def setUp(self):
super(FromKerasFile, self).setUp()
self._keras_file = None
self._custom_objects = None
if not context.executing_eagerly():
keras.backend.clear_session()
def tearDown(self):
if self._keras_file:
os.remove(self._keras_file)
super(FromKerasFile, self).tearDown()
def _getSequentialModel(self, include_custom_layer=False):
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
if include_custom_layer:
model.add(MyAddLayer(1.0))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
try:
fd, self._keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
if include_custom_layer:
self._custom_objects = {'MyAddLayer': MyAddLayer}
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testSequentialModel(self, test_context):
"""Test a Sequential tf.keras model with default inputs."""
with test_context():
self._getSequentialModel()
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(self._keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testCustomLayer(self, test_context):
"""Test a Sequential tf.keras model with default inputs."""
with test_context():
self._getSequentialModel(include_custom_layer=True)
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, custom_objects=self._custom_objects)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(
self._keras_file, custom_objects=self._custom_objects)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument."""
ops.disable_eager_execution()
self._getSequentialModel()
# Invalid input array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_arrays=['invalid-input'])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_arrays=['dense_input'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testSequentialModelInputShape(self):
"""Test a Sequential tf.keras model testing input shapes argument."""
self._getSequentialModel()
# Passing in shape of invalid input array raises error.
with self.assertRaises(ValueError) as error:
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_shapes={'invalid-input': [2, 3]})
self.assertEqual(
"Invalid tensor 'invalid-input' found in tensor shapes map.",
str(error.exception))
# Passing in shape of valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_shapes={'dense_input': [2, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check input shape from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('dense_input', input_details[0]['name'])
self.assertTrue(([2, 3] == input_details[0]['shape']).all())
def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument."""
ops.disable_eager_execution()
self._getSequentialModel()
# Invalid output array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
self._keras_file, output_arrays=['invalid-output'])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
# Valid output array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, output_arrays=['time_distributed/Reshape_1'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testFunctionalModel(self, test_context):
"""Test a Functional tf.keras model with default inputs."""
with test_context():
inputs = keras.layers.Input(shape=(3,), name='input')
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
model.predict(x)
fd, self._keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(self._keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
def testFunctionalModelMultipleInputs(self):
"""Test a Functional tf.keras model with multiple inputs and outputs."""
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.mae],
loss_weights=[1., 0.5])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
model.predict([input_a_np, input_b_np], batch_size=5)
fd, self._keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual('input_a', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('input_b', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 2)
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual(np.float32, output_details[1]['dtype'])
self.assertTrue(([1, 4] == output_details[1]['shape']).all())
self.assertEqual((0., 0.), output_details[1]['quantization'])
def testFunctionalSequentialModel(self):
"""Test a Functional tf.keras model containing a Sequential model."""
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model = keras.models.Model(model.input, model.output)
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
model.predict(x)
fd, self._keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(self._keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
def testSequentialModelTocoConverter(self):
"""Test a Sequential tf.keras model with deprecated TocoConverter."""
self._getSequentialModel()
converter = lite.TocoConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testGraphDebugInfo(self, test_context):
"""Test a Sequential tf.keras model has debug info captured."""
with test_context():
self._getSequentialModel()
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
converter.convert()
self.assertValidDebugInfo(converter._debug_info)
class GrapplerTest(TestModels):
def testConstantFolding(self):
ops.disable_eager_execution()
# Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[3, 3], dtype=dtypes.float32)
y_const = constant_op.constant([1., 2., 3.])
y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])
out_tensor = math_ops.matmul(in_tensor, y_broadcast, name='output')
sess = session.Session()
# Convert model.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([3, 3] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([3, 3] == output_details[0]['shape']).all())
class ImportOpsUtilTest(test_util.TensorFlowTestCase):
def testGetPotentiallySupportedOps(self):
self.assertIsNotNone(lite.get_potentially_supported_ops())
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/lite_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for util.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# TODO(nupurgarg): Add test for Grappler and frozen graph related functions.
class UtilTest(test_util.TensorFlowTestCase):
def testConvertDtype(self):
self.assertEqual(
util.convert_dtype_to_tflite_type(lite_constants.FLOAT),
_types_pb2.FLOAT)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.float32), _types_pb2.FLOAT)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.int32), _types_pb2.INT32)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.int64), _types_pb2.INT64)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.string), _types_pb2.STRING)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.uint8),
_types_pb2.QUANTIZED_UINT8)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.complex64),
_types_pb2.COMPLEX64)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.half), _types_pb2.FLOAT16)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.bool), _types_pb2.BOOL)
def testTensorName(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
out_tensors = array_ops.split(
value=in_tensor, num_or_size_splits=[1, 1, 1, 1], axis=0)
expect_names = ["split", "split:1", "split:2", "split:3"]
for i in range(len(expect_names)):
got_name = util.get_tensor_name(out_tensors[i])
self.assertEqual(got_name, expect_names[i])
@test_util.enable_control_flow_v2
def testRemoveLowerUsingSwitchMerge(self):
with ops.Graph().as_default():
i = array_ops.placeholder(shape=(), dtype=dtypes.int32)
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
control_flow_ops.while_loop(c, b, [i])
sess = session.Session()
new_graph_def = convert_to_constants.disable_lower_using_switch_merge(
sess.graph_def)
lower_using_switch_merge_is_removed = False
for node in new_graph_def.node:
if node.op == "While" or node.op == "StatelessWhile":
if not node.attr["_lower_using_switch_merge"].b:
lower_using_switch_merge_is_removed = True
self.assertEqual(lower_using_switch_merge_is_removed, True)
class TensorFunctionsTest(test_util.TensorFlowTestCase):
def testGetTensorsValid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
tensors = util.get_tensors_from_tensor_names(sess.graph, ["Placeholder"])
self.assertEqual("Placeholder:0", tensors[0].name)
def testGetTensorsInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
with self.assertRaises(ValueError) as error:
util.get_tensors_from_tensor_names(sess.graph, ["invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
def testSetTensorShapeValid(self):
with ops.Graph().as_default():
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
util.set_tensor_shapes([tensor], {"Placeholder": [5, 3, 5]})
self.assertEqual([5, 3, 5], tensor.shape.as_list())
def testSetTensorShapeNoneValid(self):
with ops.Graph().as_default():
tensor = array_ops.placeholder(dtype=dtypes.float32)
self.assertEqual(None, tensor.shape)
util.set_tensor_shapes([tensor], {"Placeholder": [1, 3, 5]})
self.assertEqual([1, 3, 5], tensor.shape.as_list())
def testSetTensorShapeArrayInvalid(self):
# Tests set_tensor_shape where the tensor name passed in doesn't exist.
with ops.Graph().as_default():
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
with self.assertRaises(ValueError) as error:
util.set_tensor_shapes([tensor], {"invalid-input": [5, 3, 5]})
self.assertEqual(
"Invalid tensor 'invalid-input' found in tensor shapes map.",
str(error.exception))
self.assertEqual([None, 3, 5], tensor.shape.as_list())
@test_util.run_deprecated_v1
def testSetTensorShapeDimensionInvalid(self):
# Tests set_tensor_shape where the shape passed in is incompatiable.
with ops.Graph().as_default():
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
with self.assertRaises(ValueError) as error:
util.set_tensor_shapes([tensor], {"Placeholder": [1, 5, 5]})
self.assertIn("The shape of tensor 'Placeholder' cannot be changed",
str(error.exception))
self.assertEqual([None, 3, 5], tensor.shape.as_list())
def testSetTensorShapeEmpty(self):
with ops.Graph().as_default():
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
util.set_tensor_shapes([tensor], {})
self.assertEqual([None, 3, 5], tensor.shape.as_list())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python TF-Lite interpreter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import platform
import sys
import numpy as np
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Lazy load since some of the performance benchmark skylark rules
# break dependencies. Must use double quotes to match code internal rewrite
# rule.
# pylint: disable=g-inconsistent-quotes
_interpreter_wrapper = LazyLoader(
"_interpreter_wrapper", globals(),
"tensorflow.lite.python.interpreter_wrapper."
"tensorflow_wrap_interpreter_wrapper")
# pylint: enable=g-inconsistent-quotes
del LazyLoader
except ImportError:
# When full Tensorflow Python PIP is not available do not use lazy load
# and instead of the tflite_runtime path.
from tflite_runtime import interpreter_wrapper as _interpreter_wrapper
def tf_export_dummy(*x, **kwargs):
del x, kwargs
return lambda x: x
_tf_export = tf_export_dummy
class Delegate(object):
"""Python wrapper class to manage TfLiteDelegate objects.
The shared library is expected to have two functions:
TfLiteDelegate* tflite_plugin_create_delegate(
char**, char**, size_t, void (*report_error)(const char *))
void tflite_plugin_destroy_delegate(TfLiteDelegate*)
The first one creates a delegate object. It may return NULL to indicate an
error (with a suitable error message reported by calling report_error()).
The second one destroys delegate object and must be called for every
created delegate object. Passing NULL as argument value is allowed, i.e.
tflite_plugin_destroy_delegate(tflite_plugin_create_delegate(...))
always works.
"""
def __init__(self, library, options=None):
"""Loads delegate from the shared library.
Args:
library: Shared library name.
options: Dictionary of options that are required to load the delegate. All
keys and values in the dictionary should be serializable. Consult the
documentation of the specific delegate for required and legal options.
(default None)
Raises:
RuntimeError: This is raised if the Python implementation is not CPython.
"""
# TODO(b/136468453): Remove need for __del__ ordering needs of CPython
# by using explicit closes(). See implementation of Interpreter __del__.
if platform.python_implementation() != 'CPython':
raise RuntimeError('Delegates are currently only supported into CPython'
'due to missing immediate reference counting.')
self._library = ctypes.pydll.LoadLibrary(library)
self._library.tflite_plugin_create_delegate.argtypes = [
ctypes.POINTER(ctypes.c_char_p),
ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.CFUNCTYPE(None, ctypes.c_char_p)
]
self._library.tflite_plugin_create_delegate.restype = ctypes.c_void_p
# Convert the options from a dictionary to lists of char pointers.
options = options or {}
options_keys = (ctypes.c_char_p * len(options))()
options_values = (ctypes.c_char_p * len(options))()
for idx, (key, value) in enumerate(options.items()):
options_keys[idx] = str(key).encode('utf-8')
options_values[idx] = str(value).encode('utf-8')
class ErrorMessageCapture(object):
def __init__(self):
self.message = ''
def report(self, x):
self.message += x
capture = ErrorMessageCapture()
error_capturer_cb = ctypes.CFUNCTYPE(None, ctypes.c_char_p)(capture.report)
# Do not make a copy of _delegate_ptr. It is freed by Delegate's finalizer.
self._delegate_ptr = self._library.tflite_plugin_create_delegate(
options_keys, options_values, len(options), error_capturer_cb)
if self._delegate_ptr is None:
raise ValueError(capture.message)
def __del__(self):
# __del__ can be called multiple times, so if the delegate is destroyed.
# don't try to destroy it twice.
if self._library is not None:
self._library.tflite_plugin_destroy_delegate.argtypes = [ctypes.c_void_p]
self._library.tflite_plugin_destroy_delegate(self._delegate_ptr)
self._library = None
def _get_native_delegate_pointer(self):
"""Returns the native TfLiteDelegate pointer.
It is not safe to copy this pointer because it needs to be freed.
Returns:
TfLiteDelegate *
"""
return self._delegate_ptr
@_tf_export('lite.experimental.load_delegate')
def load_delegate(library, options=None):
"""Returns loaded Delegate object.
Args:
library: Name of shared library containing the
[TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates).
options: Dictionary of options that are required to load the delegate. All
keys and values in the dictionary should be convertible to str. Consult
the documentation of the specific delegate for required and legal options.
(default None)
Returns:
Delegate object.
Raises:
ValueError: Delegate failed to load.
RuntimeError: If delegate loading is used on unsupported platform.
"""
# TODO(b/137299813): Fix darwin support for delegates.
if sys.platform == 'darwin':
raise RuntimeError('Dynamic loading of delegates on Darwin not supported.')
try:
delegate = Delegate(library, options)
except ValueError as e:
raise ValueError('Failed to load delegate from {}\n{}'.format(
library, str(e)))
return delegate
@_tf_export('lite.Interpreter')
class Interpreter(object):
"""Interpreter interface for TensorFlow Lite Models.
This makes the TensorFlow Lite interpreter accessible in Python.
It is possible to use this interpreter in a multithreaded Python environment,
but you must be sure to call functions of a particular instance from only
one thread at a time. So if you want to have 4 threads running different
inferences simultaneously, create an interpreter for each one as thread-local
data. Similarly, if you are calling invoke() in one thread on a single
interpreter but you want to use tensor() on another thread once it is done,
you must use a synchronization primitive between the threads to ensure invoke
has returned before calling tensor().
"""
def __init__(self,
model_path=None,
model_content=None,
experimental_delegates=None):
"""Constructor.
Args:
model_path: Path to TF-Lite Flatbuffer file.
model_content: Content of model.
experimental_delegates: Experimental. Subject to change. List of
[TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates)
objects returned by lite.load_delegate().
Raises:
ValueError: If the interpreter was unable to create.
"""
if model_path and not model_content:
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile(
model_path))
if not self._interpreter:
raise ValueError('Failed to open {}'.format(model_path))
elif model_content and not model_path:
# Take a reference, so the pointer remains valid.
# Since python strings are immutable then PyString_XX functions
# will always return the same pointer.
self._model_content = model_content
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer(
model_content))
elif not model_path and not model_path:
raise ValueError('`model_path` or `model_content` must be specified.')
else:
raise ValueError('Can\'t both provide `model_path` and `model_content`')
# Each delegate is a wrapper that owns the delegates that have been loaded
# as plugins. The interpreter wrapper will be using them, but we need to
# hold them in a list so that the lifetime is preserved at least as long as
# the interpreter wrapper.
self._delegates = []
if experimental_delegates:
self._delegates = experimental_delegates
for delegate in self._delegates:
self._interpreter.ModifyGraphWithDelegate(
delegate._get_native_delegate_pointer()) # pylint: disable=protected-access
def __del__(self):
# Must make sure the interpreter is destroyed before things that
# are used by it like the delegates. NOTE this only works on CPython
# probably.
# TODO(b/136468453): Remove need for __del__ ordering needs of CPython
# by using explicit closes(). See implementation of Interpreter __del__.
self._interpreter = None
self._delegates = None
def allocate_tensors(self):
self._ensure_safe()
return self._interpreter.AllocateTensors()
def _safe_to_run(self):
"""Returns true if there exist no numpy array buffers.
This means it is safe to run tflite calls that may destroy internally
allocated memory. This works, because in the wrapper.cc we have made
the numpy base be the self._interpreter.
"""
# NOTE, our tensor() call in cpp will use _interpreter as a base pointer.
# If this environment is the only _interpreter, then the ref count should be
# 2 (1 in self and 1 in temporary of sys.getrefcount).
return sys.getrefcount(self._interpreter) == 2
def _ensure_safe(self):
"""Makes sure no numpy arrays pointing to internal buffers are active.
This should be called from any function that will call a function on
_interpreter that may reallocate memory e.g. invoke(), ...
Raises:
RuntimeError: If there exist numpy objects pointing to internal memory
then we throw.
"""
if not self._safe_to_run():
raise RuntimeError("""There is at least 1 reference to internal data
in the interpreter in the form of a numpy array or slice. Be sure to
only hold the function returned from tensor() if you are using raw
data access.""")
def _get_tensor_details(self, tensor_index):
"""Gets tensor details.
Args:
tensor_index: Tensor index of tensor to query.
Returns:
a dictionary containing the name, index, shape and type of the tensor.
Raises:
ValueError: If tensor_index is invalid.
"""
tensor_index = int(tensor_index)
tensor_name = self._interpreter.TensorName(tensor_index)
tensor_size = self._interpreter.TensorSize(tensor_index)
tensor_type = self._interpreter.TensorType(tensor_index)
tensor_quantization = self._interpreter.TensorQuantization(tensor_index)
if not tensor_name or not tensor_type:
raise ValueError('Could not get tensor details')
details = {
'name': tensor_name,
'index': tensor_index,
'shape': tensor_size,
'dtype': tensor_type,
'quantization': tensor_quantization,
}
return details
def get_tensor_details(self):
"""Gets tensor details for every tensor with valid tensor details.
Tensors where required information about the tensor is not found are not
added to the list. This includes temporary tensors without a name.
Returns:
A list of dictionaries containing tensor information.
"""
tensor_details = []
for idx in range(self._interpreter.NumTensors()):
try:
tensor_details.append(self._get_tensor_details(idx))
except ValueError:
pass
return tensor_details
def get_input_details(self):
"""Gets model input details.
Returns:
A list of input details.
"""
return [
self._get_tensor_details(i) for i in self._interpreter.InputIndices()
]
def set_tensor(self, tensor_index, value):
"""Sets the value of the input tensor. Note this copies data in `value`.
If you want to avoid copying, you can use the `tensor()` function to get a
numpy buffer pointing to the input buffer in the tflite interpreter.
Args:
tensor_index: Tensor index of tensor to set. This value can be gotten from
the 'index' field in get_input_details.
value: Value of tensor to set.
Raises:
ValueError: If the interpreter could not set the tensor.
"""
self._interpreter.SetTensor(tensor_index, value)
def resize_tensor_input(self, input_index, tensor_size):
"""Resizes an input tensor.
Args:
input_index: Tensor index of input to set. This value can be gotten from
the 'index' field in get_input_details.
tensor_size: The tensor_shape to resize the input to.
Raises:
ValueError: If the interpreter could not resize the input tensor.
"""
self._ensure_safe()
# `ResizeInputTensor` now only accepts int32 numpy array as `tensor_size
# parameter.
tensor_size = np.array(tensor_size, dtype=np.int32)
self._interpreter.ResizeInputTensor(input_index, tensor_size)
def get_output_details(self):
"""Gets model output details.
Returns:
A list of output details.
"""
return [
self._get_tensor_details(i) for i in self._interpreter.OutputIndices()
]
def get_tensor(self, tensor_index):
"""Gets the value of the input tensor (get a copy).
If you wish to avoid the copy, use `tensor()`. This function cannot be used
to read intermediate results.
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
the 'index' field in get_output_details.
Returns:
a numpy array.
"""
return self._interpreter.GetTensor(tensor_index)
def tensor(self, tensor_index):
"""Returns function that gives a numpy view of the current tensor buffer.
This allows reading and writing to this tensors w/o copies. This more
closely mirrors the C++ Interpreter class interface's tensor() member, hence
the name. Be careful to not hold these output references through calls
to `allocate_tensors()` and `invoke()`. This function cannot be used to read
intermediate results.
Usage:
```
interpreter.allocate_tensors()
input = interpreter.tensor(interpreter.get_input_details()[0]["index"])
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])
for i in range(10):
input().fill(3.)
interpreter.invoke()
print("inference %s" % output())
```
Notice how this function avoids making a numpy array directly. This is
because it is important to not hold actual numpy views to the data longer
than necessary. If you do, then the interpreter can no longer be invoked,
because it is possible the interpreter would resize and invalidate the
referenced tensors. The NumPy API doesn't allow any mutability of the
the underlying buffers.
WRONG:
```
input = interpreter.tensor(interpreter.get_input_details()[0]["index"])()
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])()
interpreter.allocate_tensors() # This will throw RuntimeError
for i in range(10):
input.fill(3.)
interpreter.invoke() # this will throw RuntimeError since input,output
```
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
the 'index' field in get_output_details.
Returns:
A function that can return a new numpy array pointing to the internal
TFLite tensor state at any point. It is safe to hold the function forever,
but it is not safe to hold the numpy array forever.
"""
return lambda: self._interpreter.tensor(self._interpreter, tensor_index)
def invoke(self):
"""Invoke the interpreter.
Be sure to set the input sizes, allocate tensors and fill values before
calling this. Also, note that this function releases the GIL so heavy
computation can be done in the background while the Python interpreter
continues. No other function on this object should be called while the
invoke() call has not finished.
Raises:
ValueError: When the underlying interpreter fails raise ValueError.
"""
self._ensure_safe()
self._interpreter.Invoke()
def reset_all_variables(self):
return self._interpreter.ResetVariableTensors()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/interpreter.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite Python Interface: Sanity check."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import io
import sys
import numpy as np
import six
from tensorflow.lite.python import interpreter as interpreter_wrapper
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class InterpreterTest(test_util.TensorFlowTestCase):
def testFloat(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual((0.0, 0), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0.0, 0), output_details[0]['quantization'])
test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
expected_output = np.array([[4.0, 3.0, 2.0, 1.0]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testUint8(self):
model_path = resource_loader.get_path_to_datafile(
'testdata/permute_uint8.tflite')
with io.open(model_path, 'rb') as model_file:
data = model_file.read()
interpreter = interpreter_wrapper.Interpreter(model_content=data)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual((1.0, 0), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((1.0, 0), output_details[0]['quantization'])
test_input = np.array([[1, 2, 3, 4]], dtype=np.uint8)
expected_output = np.array([[4, 3, 2, 1]], dtype=np.uint8)
interpreter.resize_tensor_input(input_details[0]['index'],
test_input.shape)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testString(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/gather_string.tflite'))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([10] == input_details[0]['shape']).all())
self.assertEqual((0.0, 0), input_details[0]['quantization'])
self.assertEqual('indices', input_details[1]['name'])
self.assertEqual(np.int64, input_details[1]['dtype'])
self.assertTrue(([3] == input_details[1]['shape']).all())
self.assertEqual((0.0, 0), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([3] == output_details[0]['shape']).all())
self.assertEqual((0.0, 0), output_details[0]['quantization'])
test_input = np.array([1, 2, 3], dtype=np.int64)
interpreter.set_tensor(input_details[1]['index'], test_input)
test_input = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'])
expected_output = np.array([b'b', b'c', b'd'])
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
class InterpreterTestErrorPropagation(test_util.TensorFlowTestCase):
def testInvalidModelContent(self):
with self.assertRaisesRegexp(ValueError,
'Model provided has model identifier \''):
interpreter_wrapper.Interpreter(model_content=six.b('garbage'))
def testInvalidModelFile(self):
with self.assertRaisesRegexp(
ValueError, 'Could not open \'totally_invalid_file_name\''):
interpreter_wrapper.Interpreter(
model_path='totally_invalid_file_name')
def testInvokeBeforeReady(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
with self.assertRaisesRegexp(RuntimeError,
'Invoke called on model that is not ready'):
interpreter.invoke()
def testInvalidModelFileContent(self):
with self.assertRaisesRegexp(
ValueError, '`model_path` or `model_content` must be specified.'):
interpreter_wrapper.Interpreter(model_path=None, model_content=None)
def testInvalidIndex(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
interpreter.allocate_tensors()
# Invalid tensor index passed.
with self.assertRaisesRegexp(ValueError, 'Tensor with no shape found.'):
interpreter._get_tensor_details(4)
class InterpreterTensorAccessorTest(test_util.TensorFlowTestCase):
def setUp(self):
self.interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
self.interpreter.allocate_tensors()
self.input0 = self.interpreter.get_input_details()[0]['index']
self.initial_data = np.array([[-1., -2., -3., -4.]], np.float32)
def testTensorAccessor(self):
"""Check that tensor returns a reference."""
array_ref = self.interpreter.tensor(self.input0)
np.copyto(array_ref(), self.initial_data)
self.assertAllEqual(array_ref(), self.initial_data)
self.assertAllEqual(
self.interpreter.get_tensor(self.input0), self.initial_data)
def testGetTensorAccessor(self):
"""Check that get_tensor returns a copy."""
self.interpreter.set_tensor(self.input0, self.initial_data)
array_initial_copy = self.interpreter.get_tensor(self.input0)
new_value = np.add(1., array_initial_copy)
self.interpreter.set_tensor(self.input0, new_value)
self.assertAllEqual(array_initial_copy, self.initial_data)
self.assertAllEqual(self.interpreter.get_tensor(self.input0), new_value)
def testBase(self):
self.assertTrue(self.interpreter._safe_to_run())
_ = self.interpreter.tensor(self.input0)
self.assertTrue(self.interpreter._safe_to_run())
in0 = self.interpreter.tensor(self.input0)()
self.assertFalse(self.interpreter._safe_to_run())
in0b = self.interpreter.tensor(self.input0)()
self.assertFalse(self.interpreter._safe_to_run())
# Now get rid of the buffers so that we can evaluate.
del in0
del in0b
self.assertTrue(self.interpreter._safe_to_run())
def testBaseProtectsFunctions(self):
in0 = self.interpreter.tensor(self.input0)()
# Make sure we get an exception if we try to run an unsafe operation
with self.assertRaisesRegexp(
RuntimeError, 'There is at least 1 reference'):
_ = self.interpreter.allocate_tensors()
# Make sure we get an exception if we try to run an unsafe operation
with self.assertRaisesRegexp(
RuntimeError, 'There is at least 1 reference'):
_ = self.interpreter.invoke()
# Now test that we can run
del in0 # this is our only buffer reference, so now it is safe to change
in0safe = self.interpreter.tensor(self.input0)
_ = self.interpreter.allocate_tensors()
del in0safe # make sure in0Safe is held but lint doesn't complain
class InterpreterDelegateTest(test_util.TensorFlowTestCase):
def setUp(self):
self._delegate_file = resource_loader.get_path_to_datafile(
'testdata/test_delegate.so')
self._model_file = resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite')
# Load the library to reset the counters.
library = ctypes.pydll.LoadLibrary(self._delegate_file)
library.initialize_counters()
def _TestInterpreter(self, model_path, options=None):
"""Test wrapper function that creates an interpreter with the delegate."""
# TODO(b/137299813): Enable when we fix for mac
if sys.platform == 'darwin': return
delegate = interpreter_wrapper.load_delegate(self._delegate_file, options)
return interpreter_wrapper.Interpreter(
model_path=model_path, experimental_delegates=[delegate])
def testDelegate(self):
"""Tests the delegate creation and destruction."""
# TODO(b/137299813): Enable when we fix for mac
if sys.platform == 'darwin': return
interpreter = self._TestInterpreter(model_path=self._model_file)
lib = interpreter._delegates[0]._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
del interpreter
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 1)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
def testMultipleInterpreters(self):
# TODO(b/137299813): Enable when we fix for mac
if sys.platform == 'darwin': return
delegate = interpreter_wrapper.load_delegate(self._delegate_file)
lib = delegate._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
interpreter_a = interpreter_wrapper.Interpreter(
model_path=self._model_file, experimental_delegates=[delegate])
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
interpreter_b = interpreter_wrapper.Interpreter(
model_path=self._model_file, experimental_delegates=[delegate])
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
del delegate
del interpreter_a
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
del interpreter_b
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 1)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
def testDestructionOrder(self):
"""Make sure internal _interpreter object is destroyed before delegate."""
# Track which order destructions were doned in
# TODO(b/137299813): Enable when we fix for mac
if sys.platform == 'darwin': return
destructions = []
def register_destruction(x):
destructions.append(x)
return 0
# Make a wrapper for the callback so we can send this to ctypes
delegate = interpreter_wrapper.load_delegate(self._delegate_file)
prototype = ctypes.CFUNCTYPE(ctypes.c_int, (ctypes.c_char_p))
destroy_callback = prototype(register_destruction)
delegate._library.set_destroy_callback(destroy_callback)
# Make an interpreter with the delegate
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'), experimental_delegates=[delegate])
class InterpreterDestroyCallback(object):
def __del__(self):
register_destruction('interpreter')
interpreter._interpreter.stuff = InterpreterDestroyCallback()
# Destroy both delegate and interpreter
del delegate
del interpreter
# check the interpreter was destroyed before the delegate
self.assertEqual(destructions, ['interpreter', 'test_delegate'])
def testOptions(self):
# TODO(b/137299813): Enable when we fix for mac
if sys.platform == 'darwin': return
delegate_a = interpreter_wrapper.load_delegate(self._delegate_file)
lib = delegate_a._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 0)
delegate_b = interpreter_wrapper.load_delegate(
self._delegate_file, options={
'unused': False,
'options_counter': 2
})
lib = delegate_b._library
self.assertEqual(lib.get_num_delegates_created(), 2)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 2)
del delegate_a
del delegate_b
self.assertEqual(lib.get_num_delegates_created(), 2)
self.assertEqual(lib.get_num_delegates_destroyed(), 2)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 2)
def testFail(self):
# TODO(b/137299813): Enable when we fix for mac
if sys.platform == 'darwin': return
with self.assertRaisesRegexp(
ValueError, 'Failed to load delegate from .*\nFail argument sent.'):
interpreter_wrapper.load_delegate(
self._delegate_file, options={'fail': 'fail'})
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/interpreter_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to select TF op usage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python import lite
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python.client import session
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import tracking
class FromSessionTest(test_util.TensorFlowTestCase):
def testFlexMode(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
def testDeprecatedFlags(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.target_ops = set([lite.OpsSet.SELECT_TF_OPS])
# Ensure `target_ops` is set to the correct value after flag deprecation.
self.assertEqual(converter.target_ops, set([lite.OpsSet.SELECT_TF_OPS]))
self.assertEqual(converter.target_spec.supported_ops,
set([lite.OpsSet.SELECT_TF_OPS]))
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
class FromConcreteFunctionTest(test_util.TensorFlowTestCase):
@test_util.run_v2_only
def testFloat(self):
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
tflite_model = converter.convert()
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/lite_flex_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python command line interface for running TOCO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_constants
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.platform import app
def _parse_array(values, type_fn=str):
if values is not None:
return [type_fn(val) for val in values.split(",") if val]
return None
def _parse_set(values):
if values is not None:
return set([item for item in values.split(",") if item])
return None
def _parse_inference_type(value, flag):
"""Converts the inference type to the value of the constant.
Args:
value: str representing the inference type.
flag: str representing the flag name.
Returns:
tf.dtype.
Raises:
ValueError: Unsupported value.
"""
if value == "FLOAT":
return lite_constants.FLOAT
if value == "QUANTIZED_UINT8":
return lite_constants.QUANTIZED_UINT8
raise ValueError("Unsupported value for --{0}. Only FLOAT and "
"QUANTIZED_UINT8 are supported.".format(flag))
def _get_toco_converter(flags):
"""Makes a TFLiteConverter object based on the flags provided.
Args:
flags: argparse.Namespace object containing TFLite flags.
Returns:
TFLiteConverter object.
Raises:
ValueError: Invalid flags.
"""
# Parse input and output arrays.
input_arrays = _parse_array(flags.input_arrays)
input_shapes = None
if flags.input_shapes:
input_shapes_list = [
_parse_array(shape, type_fn=int)
for shape in flags.input_shapes.split(":")
]
input_shapes = dict(zip(input_arrays, input_shapes_list))
output_arrays = _parse_array(flags.output_arrays)
converter_kwargs = {
"input_arrays": input_arrays,
"input_shapes": input_shapes,
"output_arrays": output_arrays
}
# Create TFLiteConverter.
if flags.graph_def_file:
converter_fn = lite.TFLiteConverter.from_frozen_graph
converter_kwargs["graph_def_file"] = flags.graph_def_file
elif flags.saved_model_dir:
converter_fn = lite.TFLiteConverter.from_saved_model
converter_kwargs["saved_model_dir"] = flags.saved_model_dir
converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set)
converter_kwargs["signature_key"] = flags.saved_model_signature_key
elif flags.keras_model_file:
converter_fn = lite.TFLiteConverter.from_keras_model_file
converter_kwargs["model_file"] = flags.keras_model_file
else:
raise ValueError("--graph_def_file, --saved_model_dir, or "
"--keras_model_file must be specified.")
return converter_fn(**converter_kwargs)
def _convert_tf1_model(flags):
"""Calls function to convert the TensorFlow 1.X model into a TFLite model.
Args:
flags: argparse.Namespace object.
Raises:
ValueError: Invalid flags.
"""
# Create converter.
converter = _get_toco_converter(flags)
if flags.inference_type:
converter.inference_type = _parse_inference_type(flags.inference_type,
"inference_type")
if flags.inference_input_type:
converter.inference_input_type = _parse_inference_type(
flags.inference_input_type, "inference_input_type")
if flags.output_format:
converter.output_format = _toco_flags_pb2.FileFormat.Value(
flags.output_format)
if flags.mean_values and flags.std_dev_values:
input_arrays = converter.get_input_arrays()
std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)
# In quantized inference, mean_value has to be integer so that the real
# value 0.0 is exactly representable.
if converter.inference_type == lite_constants.QUANTIZED_UINT8:
mean_values = _parse_array(flags.mean_values, type_fn=int)
else:
mean_values = _parse_array(flags.mean_values, type_fn=float)
quant_stats = list(zip(mean_values, std_dev_values))
if ((not flags.input_arrays and len(input_arrays) > 1) or
(len(input_arrays) != len(quant_stats))):
raise ValueError("Mismatching --input_arrays, --std_dev_values, and "
"--mean_values. The flags must have the same number of "
"items. The current input arrays are '{0}'. "
"--input_arrays must be present when specifying "
"--std_dev_values and --mean_values with multiple input "
"tensors in order to map between names and "
"values.".format(",".join(input_arrays)))
converter.quantized_input_stats = dict(zip(input_arrays, quant_stats))
if (flags.default_ranges_min is not None) and (flags.default_ranges_max is
not None):
converter.default_ranges_stats = (flags.default_ranges_min,
flags.default_ranges_max)
if flags.drop_control_dependency:
converter.drop_control_dependency = flags.drop_control_dependency
if flags.reorder_across_fake_quant:
converter.reorder_across_fake_quant = flags.reorder_across_fake_quant
if flags.change_concat_input_ranges:
converter.change_concat_input_ranges = (
flags.change_concat_input_ranges == "TRUE")
if flags.allow_custom_ops:
converter.allow_custom_ops = flags.allow_custom_ops
if flags.target_ops:
ops_set_options = lite.OpsSet.get_options()
converter.target_ops = set()
for option in flags.target_ops.split(","):
if option not in ops_set_options:
raise ValueError("Invalid value for --target_ops. Options: "
"{0}".format(",".join(ops_set_options)))
converter.target_spec.supported_ops.add(lite.OpsSet(option))
if flags.post_training_quantize:
converter.optimizations = [lite.Optimize.DEFAULT]
if converter.inference_type == lite_constants.QUANTIZED_UINT8:
print("--post_training_quantize quantizes a graph of inference_type "
"FLOAT. Overriding inference type QUANTIZED_UINT8 to FLOAT.")
converter.inference_type = lite_constants.FLOAT
if flags.quantize_to_float16:
converter.target_spec.supported_types = [lite.constants.FLOAT16]
if not flags.post_training_quantize:
print("--quantize_to_float16 will only take effect with the "
"--post_training_quantize flag enabled.")
if flags.dump_graphviz_dir:
converter.dump_graphviz_dir = flags.dump_graphviz_dir
if flags.dump_graphviz_video:
converter.dump_graphviz_vode = flags.dump_graphviz_video
# Convert model.
output_data = converter.convert()
with open(flags.output_file, "wb") as f:
f.write(output_data)
def _convert_tf2_model(flags):
"""Calls function to convert the TensorFlow 2.0 model into a TFLite model.
Args:
flags: argparse.Namespace object.
Raises:
ValueError: Unsupported file format.
"""
# Load the model.
if flags.saved_model_dir:
converter = lite.TFLiteConverterV2.from_saved_model(flags.saved_model_dir)
elif flags.keras_model_file:
model = keras.models.load_model(flags.keras_model_file)
converter = lite.TFLiteConverterV2.from_keras_model(model)
# Convert the model.
tflite_model = converter.convert()
with open(flags.output_file, "wb") as f:
f.write(tflite_model)
def _check_tf1_flags(flags, unparsed):
"""Checks the parsed and unparsed flags to ensure they are valid in 1.X.
Raises an error if previously support unparsed flags are found. Raises an
error for parsed flags that don't meet the required conditions.
Args:
flags: argparse.Namespace object containing TFLite flags.
unparsed: List of unparsed flags.
Raises:
ValueError: Invalid flags.
"""
# Check unparsed flags for common mistakes based on previous TOCO.
def _get_message_unparsed(flag, orig_flag, new_flag):
if flag.startswith(orig_flag):
return "\n Use {0} instead of {1}".format(new_flag, orig_flag)
return ""
if unparsed:
output = ""
for flag in unparsed:
output += _get_message_unparsed(flag, "--input_file", "--graph_def_file")
output += _get_message_unparsed(flag, "--savedmodel_directory",
"--saved_model_dir")
output += _get_message_unparsed(flag, "--std_value", "--std_dev_values")
output += _get_message_unparsed(flag, "--batch_size", "--input_shapes")
output += _get_message_unparsed(flag, "--dump_graphviz",
"--dump_graphviz_dir")
if output:
raise ValueError(output)
# Check that flags are valid.
if flags.graph_def_file and (not flags.input_arrays or
not flags.output_arrays):
raise ValueError("--input_arrays and --output_arrays are required with "
"--graph_def_file")
if flags.input_shapes:
if not flags.input_arrays:
raise ValueError("--input_shapes must be used with --input_arrays")
if flags.input_shapes.count(":") != flags.input_arrays.count(","):
raise ValueError("--input_shapes and --input_arrays must have the same "
"number of items")
if flags.std_dev_values or flags.mean_values:
if bool(flags.std_dev_values) != bool(flags.mean_values):
raise ValueError("--std_dev_values and --mean_values must be used "
"together")
if flags.std_dev_values.count(",") != flags.mean_values.count(","):
raise ValueError("--std_dev_values, --mean_values must have the same "
"number of items")
if (flags.default_ranges_min is None) != (flags.default_ranges_max is None):
raise ValueError("--default_ranges_min and --default_ranges_max must be "
"used together")
if flags.dump_graphviz_video and not flags.dump_graphviz_dir:
raise ValueError("--dump_graphviz_video must be used with "
"--dump_graphviz_dir")
def _get_tf1_parser():
"""Returns ArgumentParser for tflite_convert for TensorFlow 1.X."""
parser = argparse.ArgumentParser(
description=("Command line tool to run TensorFlow Lite Converter."))
# Output file flag.
parser.add_argument(
"--output_file",
type=str,
help="Full filepath of the output file.",
required=True)
# Input file flags.
input_file_group = parser.add_mutually_exclusive_group(required=True)
input_file_group.add_argument(
"--graph_def_file",
type=str,
help="Full filepath of file containing frozen TensorFlow GraphDef.")
input_file_group.add_argument(
"--saved_model_dir",
type=str,
help="Full filepath of directory containing the SavedModel.")
input_file_group.add_argument(
"--keras_model_file",
type=str,
help="Full filepath of HDF5 file containing tf.Keras model.")
# Model format flags.
parser.add_argument(
"--output_format",
type=str.upper,
choices=["TFLITE", "GRAPHVIZ_DOT"],
help="Output file format.")
parser.add_argument(
"--inference_type",
type=str.upper,
choices=["FLOAT", "QUANTIZED_UINT8"],
help="Target data type of real-number arrays in the output file.")
parser.add_argument(
"--inference_input_type",
type=str.upper,
choices=["FLOAT", "QUANTIZED_UINT8"],
help=("Target data type of real-number input arrays. Allows for a "
"different type for input arrays in the case of quantization."))
# Input and output arrays flags.
parser.add_argument(
"--input_arrays",
type=str,
help="Names of the input arrays, comma-separated.")
parser.add_argument(
"--input_shapes",
type=str,
help="Shapes corresponding to --input_arrays, colon-separated.")
parser.add_argument(
"--output_arrays",
type=str,
help="Names of the output arrays, comma-separated.")
# SavedModel related flags.
parser.add_argument(
"--saved_model_tag_set",
type=str,
help=("Comma-separated set of tags identifying the MetaGraphDef within "
"the SavedModel to analyze. All tags must be present. In order to "
"pass in an empty tag set, pass in \"\". (default \"serve\")"))
parser.add_argument(
"--saved_model_signature_key",
type=str,
help=("Key identifying the SignatureDef containing inputs and outputs. "
"(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)"))
# Quantization flags.
parser.add_argument(
"--std_dev_values",
type=str,
help=("Standard deviation of training data for each input tensor, "
"comma-separated floats. Used for quantized input tensors. "
"(default None)"))
parser.add_argument(
"--mean_values",
type=str,
help=("Mean of training data for each input tensor, comma-separated "
"floats. Used for quantized input tensors. (default None)"))
parser.add_argument(
"--default_ranges_min",
type=float,
help=("Default value for min bound of min/max range values used for all "
"arrays without a specified range, Intended for experimenting with "
"quantization via \"dummy quantization\". (default None)"))
parser.add_argument(
"--default_ranges_max",
type=float,
help=("Default value for max bound of min/max range values used for all "
"arrays without a specified range, Intended for experimenting with "
"quantization via \"dummy quantization\". (default None)"))
# quantize_weights is DEPRECATED.
parser.add_argument(
"--quantize_weights",
dest="post_training_quantize",
action="store_true",
help=argparse.SUPPRESS)
parser.add_argument(
"--post_training_quantize",
dest="post_training_quantize",
action="store_true",
help=(
"Boolean indicating whether to quantize the weights of the "
"converted float model. Model size will be reduced and there will "
"be latency improvements (at the cost of accuracy). (default False)"))
parser.add_argument(
"--quantize_to_float16",
dest="quantize_to_float16",
action="store_true",
help=("Boolean indicating whether to quantize weights to fp16 instead of "
"the default int8 when post-training quantization "
"(--post_training_quantize) is enabled. (default False)"))
# Graph manipulation flags.
parser.add_argument(
"--drop_control_dependency",
action="store_true",
help=("Boolean indicating whether to drop control dependencies silently. "
"This is due to TensorFlow not supporting control dependencies. "
"(default True)"))
parser.add_argument(
"--reorder_across_fake_quant",
action="store_true",
help=("Boolean indicating whether to reorder FakeQuant nodes in "
"unexpected locations. Used when the location of the FakeQuant "
"nodes is preventing graph transformations necessary to convert "
"the graph. Results in a graph that differs from the quantized "
"training graph, potentially causing differing arithmetic "
"behavior. (default False)"))
# Usage for this flag is --change_concat_input_ranges=true or
# --change_concat_input_ranges=false in order to make it clear what the flag
# is set to. This keeps the usage consistent with other usages of the flag
# where the default is different. The default value here is False.
parser.add_argument(
"--change_concat_input_ranges",
type=str.upper,
choices=["TRUE", "FALSE"],
help=("Boolean to change behavior of min/max ranges for inputs and "
"outputs of the concat operator for quantized models. Changes the "
"ranges of concat operator overlap when true. (default False)"))
# Permitted ops flags.
parser.add_argument(
"--allow_custom_ops",
action="store_true",
help=("Boolean indicating whether to allow custom operations. When false "
"any unknown operation is an error. When true, custom ops are "
"created for any op that is unknown. The developer will need to "
"provide these to the TensorFlow Lite runtime with a custom "
"resolver. (default False)"))
parser.add_argument(
"--target_ops",
type=str,
help=("Experimental flag, subject to change. Set of OpsSet options "
"indicating which converter to use. Options: {0}. One or more "
"option may be specified. (default set([OpsSet.TFLITE_BUILTINS]))"
"".format(",".join(lite.OpsSet.get_options()))))
# Logging flags.
parser.add_argument(
"--dump_graphviz_dir",
type=str,
help=("Full filepath of folder to dump the graphs at various stages of "
"processing GraphViz .dot files. Preferred over --output_format="
"GRAPHVIZ_DOT in order to keep the requirements of the output "
"file."))
parser.add_argument(
"--dump_graphviz_video",
action="store_true",
help=("Boolean indicating whether to dump the graph after every graph "
"transformation"))
return parser
def _get_tf2_parser():
"""Returns ArgumentParser for tflite_convert for TensorFlow 2.0."""
parser = argparse.ArgumentParser(
description=("Command line tool to run TensorFlow Lite Converter."))
# Output file flag.
parser.add_argument(
"--output_file",
type=str,
help="Full filepath of the output file.",
required=True)
# Input file flags.
input_file_group = parser.add_mutually_exclusive_group(required=True)
input_file_group.add_argument(
"--saved_model_dir",
type=str,
help="Full path of the directory containing the SavedModel.")
input_file_group.add_argument(
"--keras_model_file",
type=str,
help="Full filepath of HDF5 file containing tf.Keras model.")
return parser
def run_main(_):
"""Main in toco_convert.py."""
if tf2.enabled():
parser = _get_tf2_parser()
else:
parser = _get_tf1_parser()
tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:])
if tf2.enabled():
_convert_tf2_model(tflite_flags)
else:
try:
_check_tf1_flags(tflite_flags, unparsed)
except ValueError as e:
parser.print_usage()
file_name = os.path.basename(sys.argv[0])
sys.stderr.write("{0}: error: {1}\n".format(file_name, str(e)))
sys.exit(1)
_convert_tf1_model(tflite_flags)
def main():
app.run(main=run_main, argv=sys.argv[:1])
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/tflite_convert.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import enum
from six import PY3
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.experimental.examples.lstm.rnn import dynamic_rnn # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TFLiteLSTMCell # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TfLiteRNNCell # pylint: disable=unused-import
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op # pylint: disable=unused-import
from tensorflow.lite.experimental.tensorboard.ops_util import get_potentially_supported_ops # pylint: disable=unused-import
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert import ConverterError # pylint: disable=unused-import
from tensorflow.lite.python.convert import OpsSet
from tensorflow.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.lite.python.interpreter import load_delegate # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.lite.python.util import build_debug_info_func as _build_debug_info_func
from tensorflow.lite.python.util import freeze_graph as _freeze_graph
from tensorflow.lite.python.util import get_debug_info as _get_debug_info
from tensorflow.lite.python.util import get_grappler_config as _get_grappler_config
from tensorflow.lite.python.util import get_tensor_name as _get_tensor_name
from tensorflow.lite.python.util import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.lite.python.util import is_frozen_graph as _is_frozen_graph
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tensorflow.lite.python.util import set_tensor_shapes as _set_tensor_shapes
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function as _def_function
from tensorflow.python.eager import function as _function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.keras.saving import saving_utils as _saving_utils
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
from tensorflow.python.saved_model.load import load as _load
from tensorflow.python.util import deprecation as _deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
@_tf_export("lite.Optimize")
class Optimize(enum.Enum):
"""Enum defining the optimizations to apply when generating tflite graphs.
Some optimizations may come at the cost of accuracy.
"""
# Default optimization strategy.
#
# Converter will do its best to improve size and latency based on the
# information provided.
# Enhanced optimizations can be gained by providing a representative_dataset.
# This is recommended, and is currently equivalent to the modes below.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
DEFAULT = "DEFAULT"
# Optimize for size.
#
# Optimizations that reduce the size of the model.
# The model size will be reduced.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
OPTIMIZE_FOR_SIZE = "OPTIMIZE_FOR_SIZE"
# Optimize for latency.
#
# Optimizations that reduce the latency of the model.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
OPTIMIZE_FOR_LATENCY = "OPTIMIZE_FOR_LATENCY"
def __str__(self):
return self.value
@_tf_export("lite.RepresentativeDataset")
class RepresentativeDataset(object):
"""Representative dataset to evaluate optimizations.
A representative dataset that can be used to evaluate optimizations by the
converter. E.g. converter can use these examples to estimate (min, max) ranges
by calibrating the model on inputs. This can allow converter to quantize a
converted floating point model.
"""
def __init__(self, input_gen):
"""Creates a representative dataset.
Args:
input_gen: an input generator that can be used to generate input samples
for the model. This must be a callable object that returns an object
that supports the `iter()` protocol (e.g. a generator function). The
elements generated must have same type and shape as inputs to the model.
"""
self.input_gen = input_gen
@_tf_export("lite.TargetSpec")
class TargetSpec(object):
"""Specification of target device.
Details about target device. Converter optimizes the generated model for
specific device.
Attributes:
supported_ops: Experimental flag, subject to change. Set of OpsSet options
supported by the device. (default set([OpsSet.TFLITE_BUILTINS]))
supported_types: List of types for constant values on the target device.
Supported values are types exported by lite.constants. Frequently, an
optimization choice is driven by the most compact (i.e. smallest)
type in this list (default [constants.FLOAT])
"""
def __init__(self, supported_ops=None, supported_types=None):
if supported_ops is None:
supported_ops = set([OpsSet.TFLITE_BUILTINS])
self.supported_ops = supported_ops
if supported_types is None:
supported_types = []
self.supported_types = supported_types
class TFLiteConverterBase(object):
"""Converter subclass to share functionality between V1 and V2 converters."""
def __init__(self):
self.allow_custom_ops = False
self.target_spec = TargetSpec()
self.optimizations = []
self.representative_dataset = None
self.experimental_enable_mlir_converter = False
self._debug_info = None
def _grappler_config(self):
is_only_flex_enabled = (
set([OpsSet.SELECT_TF_OPS]) == set(self.target_spec.supported_ops))
optimizers = ["constfold"]
if is_only_flex_enabled:
# The layout optimizer turns NHCW to NCHW. This provides performance
# optimizations when Flex mode is enabled. However, this is not compatible
# with builtin ops.
optimizers.append("layout")
return _get_grappler_config(optimizers)
def _validate_representative_dataset(self):
if self.representative_dataset:
if not isinstance(self.representative_dataset, RepresentativeDataset):
self.representative_dataset = RepresentativeDataset(
self.representative_dataset)
if self.representative_dataset.input_gen is None:
raise ValueError(
"Provide an input generator for representative_dataset")
elif self._is_int8_target_required():
raise ValueError("representative_dataset is required when specifying "
"TFLITE_BUILTINS_INT8 or INT8 supported types.")
def _validate_quantization(self):
if self._is_int8_target_required():
if self.target_spec.supported_types and (self._smallest_supported_type()
!= constants.INT8):
raise ValueError("TFLITE_BUILTINS_INT8 requires smallest supported "
"type to be INT8.")
def _is_int8_target_required(self):
return (set([OpsSet.TFLITE_BUILTINS_INT8]) == set(
self.target_spec.supported_ops) or
self._smallest_supported_type() == constants.INT8)
def _smallest_supported_type(self):
if self.target_spec.supported_types:
return min(self.target_spec.supported_types, key=lambda x: x.size)
else:
return None
def _any_optimization_enabled(self):
return bool(
set(self.optimizations).intersection([
Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE,
Optimize.DEFAULT
]))
def _is_post_training_optimize(self):
return self._is_int8_target_required() or self._any_optimization_enabled()
def _is_int8_weight_only_quantize(self):
return (self._is_post_training_optimize() and
(self.representative_dataset is None))
def _is_float16_quantize(self):
return self._any_optimization_enabled() and (
self._smallest_supported_type() == constants.FLOAT16)
def _is_calibration_quantize(self):
return (self._is_post_training_optimize() and
self.representative_dataset and
self._smallest_supported_type() != constants.FLOAT16)
def _calibrate_quantize_model(self, result, inference_input_type,
inference_output_type):
allow_float = not self._is_int8_target_required()
calibrate_quantize = _calibrator.Calibrator(result)
return calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen, inference_input_type,
inference_output_type, allow_float)
def _get_base_converter_args(self):
"""Returns the base converter args.
Returns:
{key str: val}
"""
float16_quantize = self._is_float16_quantize()
args = {
"input_format": constants.TENSORFLOW_GRAPHDEF,
"allow_custom_ops": self.allow_custom_ops,
"post_training_quantize": (self._is_int8_weight_only_quantize() or
float16_quantize),
"quantize_to_float16": float16_quantize,
"debug_info": self._debug_info,
"target_ops": self.target_spec.supported_ops,
"enable_mlir_converter": self.experimental_enable_mlir_converter,
}
return args
@_tf_export("lite.TFLiteConverter", v1=[])
class TFLiteConverterV2(TFLiteConverterBase):
"""Converts a TensorFlow model into TensorFlow Lite model.
Attributes:
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
target_spec: Experimental flag, subject to change. Specification of target
device.
optimizations: Experimental flag, subject to change. A list of optimizations
to apply when converting the model. E.g. `[Optimize.DEFAULT]
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use the
dataset to evaluate different optimizations.
experimental_enable_mlir_converter: Experimental flag, subject to change.
Enables the MLIR converter instead of the TOCO converter.
Example usage:
```python
# Converting a SavedModel to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.Keras model to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Converting ConcreteFunctions to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_concrete_functions([func])
tflite_model = converter.convert()
```
"""
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteConverterV2, self).__init__()
self._funcs = funcs
self._trackable_obj = trackable_obj
@classmethod
def from_concrete_functions(cls, funcs):
"""Creates a TFLiteConverter object from ConcreteFunctions.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
Returns:
TFLiteConverter object.
Raises:
Invalid input type.
"""
for func in funcs:
if not isinstance(func, _function.ConcreteFunction):
message = "This function takes in a list of ConcreteFunction."
if isinstance(func, _def_function.Function):
message += (" To get the ConcreteFunction from a Function,"
" call from_concrete_function.")
raise ValueError(message)
return cls(funcs)
@classmethod
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):
"""Creates a TFLiteConverter object from a SavedModel directory.
Args:
saved_model_dir: SavedModel directory to convert.
signature_keys: List of keys identifying SignatureDef containing inputs
and outputs. Elements should not be duplicated. By default the
`signatures` attribute of the MetaGraphdef is used. (default
saved_model.signatures)
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set(SERVING))
Returns:
TFLiteConverter object.
Raises:
Invalid signature keys.
"""
# Ensures any graphs created in Eager mode are able to run. This is required
# in order to create a tf.estimator.Exporter that exports a TFLite model.
with context.eager_mode():
saved_model = _load(saved_model_dir, tags)
if not signature_keys:
signature_keys = saved_model.signatures
funcs = []
for key in signature_keys:
if key not in saved_model.signatures:
raise ValueError("Invalid signature key '{}' found. Valid keys are "
"'{}'.".format(key, ",".join(saved_model.signatures)))
funcs.append(saved_model.signatures[key])
return cls(funcs, saved_model)
@classmethod
def from_keras_model(cls, model):
"""Creates a TFLiteConverter object from a Keras model.
Args:
model: tf.Keras.Model
Returns:
TFLiteConverter object.
"""
func = _saving_utils.trace_model_call(model)
concrete_func = func.get_concrete_function()
return cls([concrete_func])
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
# TODO(b/130297984): Add support for converting multiple function.
if len(self._funcs) != 1:
raise ValueError("This converter can only convert a single "
"ConcreteFunction. Converting multiple functions is "
"under development.")
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
self._funcs[0], lower_control_flow=False)
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
# Run a Grappler pass.
graph_def = frozen_func.graph.as_graph_def()
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=self._grappler_config(),
graph=frozen_func.graph)
# Checks dimensions in input tensor.
for tensor in input_tensors:
# Note that shape_list might be empty for scalar shapes.
shape_list = tensor.shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
# Set the batch size to 1 if undefined.
shape = tensor.shape.as_list()
shape[0] = 1
tensor.set_shape(shape)
self._validate_quantization()
self._validate_representative_dataset()
self._debug_info = _get_debug_info(
_build_debug_info_func(self._funcs[0].graph), graph_def)
converter_kwargs = self._get_base_converter_args()
# Converts model.
result = _toco_convert_impl(
input_data=graph_def,
input_tensors=input_tensors,
output_tensors=output_tensors,
**converter_kwargs)
if self._is_calibration_quantize():
result = self._calibrate_quantize_model(result, constants.FLOAT,
constants.FLOAT)
return result
@_tf_export(v1=["lite.TFLiteConverter"])
class TFLiteConverter(TFLiteConverterBase):
"""Convert a TensorFlow model into `output_format`.
This is used to convert from a TensorFlow GraphDef, SavedModel or tf.keras
model into either a TFLite FlatBuffer or graph visualization.
Attributes:
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. If `optimzations` are provided, this
parameter is ignored. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays.
If an integer type is provided and `optimizations` are not used,
`quantized_inputs_stats` must be provided.
If `inference_type` is tf.uint8, signaling conversion to a fully quantized
model from a quantization-aware trained input model, then
`inference_input_type` defaults to tf.uint8.
In all other cases, `inference_input_type` defaults to tf.float32.
Must be `{tf.float32, tf.uint8, tf.int8}`
inference_output_type: Target data type of real-number output arrays. Allows
for a different type for output arrays.
If `inference_type` is tf.uint8, signaling conversion to a fully quantized
model from a quantization-aware trained output model, then
`inference_output_type` defaults to tf.uint8.
In all other cases, `inference_output_type` must be tf.float32, an error
will be thrown otherwise.
Must be `{tf.float32, tf.uint8, tf.int8}`
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: Dict of strings representing input tensor names
mapped to tuple of floats representing the mean and standard deviation
of the training data (e.g., {"foo" : (0., 1.)}). Only need if
`inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default {})
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
post_training_quantize: Deprecated. Please specify `[Optimize.DEFAULT]` for
`optimizations` instead. Boolean indicating whether to quantize the
weights of the converted float model. Model size will be reduced and
there will be latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Deprecated. Please specify `target_spec.supported_ops` instead.
Set of OpsSet options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
target_spec: Experimental flag, subject to change. Specification of target
device.
optimizations: Experimental flag, subject to change. A list of optimizations
to apply when converting the model. E.g. `[Optimize.DEFAULT]`
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use
the dataset to evaluate different optimizations.
experimental_enable_mlir_converter: Experimental flag, subject to change.
Enables the MLIR converter instead of the TOCO converter.
Example usage:
```python
# Converting a GraphDef from session.
converter = lite.TFLiteConverter.from_session(sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a tf.keras model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_model)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
```
"""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteConverter, self).__init__()
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self.inference_type = constants.FLOAT
self.inference_input_type = None
self.inference_output_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self._post_training_quantize = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self._debug_info_func = experimental_debug_info_func
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
if not input_arrays_with_shape or not output_arrays:
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays must be defined.")
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TFLiteConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TFLiteConverter class.
"""
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(
graph_def,
input_tensors,
output_tensors,
experimental_debug_info_func=_build_debug_info_func(sess.graph))
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TFLiteConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TFLiteConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not _file_io.file_exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY3:
file_content = file_content.decode("utf-8")
else:
file_content = file_content.encode("utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TFLiteConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set("serve"))
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TFLiteConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0],
input_tensors=result[1],
output_tensors=result[2],
experimental_debug_info_func=_build_debug_info_func(result[3]))
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Creates a TFLiteConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Returns:
TFLiteConverter class.
"""
# Handles Keras when Eager mode is enabled.
if context.executing_eagerly():
if input_arrays or output_arrays:
raise ValueError("`input_arrays` and `output_arrays` are unsupported "
"with Eager mode. If your model requires any of these "
"parameters, please use disable_eager_execution().")
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file, custom_objects)
function = _saving_utils.trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
concrete_func, lower_control_flow=False)
_set_tensor_shapes(frozen_func.inputs, input_shapes)
return cls(
frozen_func.graph.as_graph_def(),
frozen_func.inputs,
frozen_func.outputs,
experimental_debug_info_func=_build_debug_info_func(
frozen_func.graph))
# Handles Keras when Eager mode is disabled.
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file, custom_objects)
sess = _keras.backend.get_session()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(
graph_def,
input_tensors,
output_tensors,
experimental_debug_info_func=_build_debug_info_func(sess.graph))
def __setattr__(self, name, value):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
if value:
self.optimizations = [Optimize.DEFAULT]
else:
self.optimizations = []
return
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
self.target_spec.supported_ops = value
return
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
return Optimize.DEFAULT in set(self.optimizations)
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
return self.target_spec.supported_ops
return object.__getattribute__(self, name)
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
# Checks dimensions in input tensor.
if self._has_valid_tensors():
for tensor in self._input_tensors:
shape = tensor.shape
if not shape:
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_get_tensor_name(tensor)))
# Note that shape_list might be empty for scalar shapes.
shape_list = shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if self.quantized_input_stats:
quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
quantized_stats = None
self._validate_quantization()
self._validate_representative_dataset()
toco_inference_input_type = self.inference_input_type
inference_input_type = self.inference_input_type
inference_output_type = self.inference_output_type
post_training_optimize = self._is_post_training_optimize()
if post_training_optimize:
# Post training optimizations require that TOCO outputs a float model.
if self.inference_type != constants.FLOAT:
raise ValueError(
"`optimizations` require that `inference_type` is set to float.")
toco_inference_input_type = constants.FLOAT
# Set up default values.
if inference_input_type is None:
inference_input_type = constants.FLOAT
if inference_output_type is None:
inference_output_type = constants.FLOAT
weight_only_quantize = self._is_int8_weight_only_quantize()
if weight_only_quantize:
# Currently, weight only quantization requires float inputs and outputs.
if (inference_input_type != constants.FLOAT or
inference_output_type != constants.FLOAT):
raise ValueError(
"Provide an inference_input_type and inference_output_type of type "
"tf.float32.")
if not post_training_optimize and self.inference_output_type is not None:
raise ValueError(
"inference_output_type is currently not supported if optimizations "
"are not enabled.")
optimized_graph = self._graph_def
if self.inference_type != constants.QUANTIZED_UINT8:
try:
optimized_graph = _run_graph_optimizations(
self._graph_def,
self._input_tensors,
self._output_tensors,
config=self._grappler_config())
except Exception:
optimized_graph = self._graph_def
self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update({
"inference_type": self.inference_type,
"inference_input_type": toco_inference_input_type,
"output_format": self.output_format,
"quantized_input_stats": quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video
})
# Converts model.
if self._has_valid_tensors():
result = _toco_convert_impl(
input_data=optimized_graph,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
result = _toco_convert_graph_def(
input_data=optimized_graph,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
**converter_kwargs)
if self._is_calibration_quantize():
result = self._calibrate_quantize_model(result, inference_input_type,
inference_output_type)
return result
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_get_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.shape.as_list()
if shape[0] is None:
shape[0] = batch_size
tensor.set_shape(shape)
@_tf_export(v1=["lite.TocoConverter"])
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This class has been deprecated. Please use `lite.TFLiteConverter` instead.
"""
@classmethod
@_deprecation.deprecated(None,
"Use `lite.TFLiteConverter.from_session` instead.")
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session."""
return TFLiteConverter.from_session(sess, input_tensors, output_tensors)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_frozen_graph` instead.")
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen graph."""
return TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays, input_shapes)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_saved_model` instead.")
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel."""
return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays,
input_shapes, output_arrays,
tag_set, signature_key)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file."""
return TFLiteConverter.from_keras_model_file(model_file, input_arrays,
input_shapes, output_arrays)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/lite.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.lite.python import lite
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
class TestModels(test_util.TensorFlowTestCase):
def _evaluateTFLiteModel(self, tflite_model, input_data):
"""Evaluates the model on the `input_data`."""
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
interpreter.invoke()
return interpreter.get_tensor(output_details[0]['index'])
def _getSimpleVariableModel(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
return root
def _getMultiFunctionModel(self):
class BasicModel(tracking.AutoTrackable):
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
return BasicModel()
def _assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_v2_test.py', file_names)
self.assertNotIn('lite_test.py', file_names)
class FromConcreteFunctionTest(TestModels):
@test_util.run_v2_only
def testTypeInvalid(self):
root = self._getSimpleVariableModel()
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_concrete_functions([root.f])
self.assertIn('call from_concrete_function', str(error.exception))
@test_util.run_v2_only
def testFloat(self):
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testScalarInput(self):
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Convert a single model in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.add.get_concrete_function(input_data)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.add(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testConvertMultipleFunctions(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[add_func, sub_func])
with self.assertRaises(ValueError) as error:
_ = converter.convert()
self.assertIn('can only convert a single ConcreteFunction',
str(error.exception))
def _getCalibrationQuantizeModel(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[1, 5, 5, 3], dtype=dtypes.float32)
])
def func(inp):
conv = nn_ops.conv2d(
inp,
filter=array_ops.ones([3, 3, 3, 16]),
strides=[1, 1, 1, 1],
padding='SAME')
output = nn_ops.relu(conv, name='output')
return output
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
root.f = func
to_save = root.f.get_concrete_function()
return (to_save, calibration_gen)
def testPostTrainingCalibrateAndQuantize(self):
func, calibration_gen = self._getCalibrationQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
def testCalibrateAndQuantizeBuiltinInt8(self):
func, calibration_gen = self._getCalibrationQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert model by specifying target spec (instead of optimizations), since
# when targeting an integer only backend, quantization is mandatory.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a concrete function has debug info captured."""
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.f = def_function.function(lambda x: root.v1 * x)
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
class FromSavedModelTest(TestModels):
@test_util.run_v2_only
def testConstModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testSignatures(self):
"""Test values for `signature_keys` argument."""
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model with invalid `signature_keys`.
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['INVALID'])
self.assertIn("Invalid signature key 'INVALID'", str(error.exception))
# Convert model with empty `signature_keys`.
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=[])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultipleFunctionModel(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'add': add_func, 'sub': sub_func})
# Ensure the converter generates.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
self.assertEqual(len(converter._funcs), 2)
# Try converting multiple functions.
with self.assertRaises(ValueError) as error:
_ = converter.convert()
self.assertIn('This converter can only convert a single ConcreteFunction',
str(error.exception))
@test_util.run_v2_only
def testKerasSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = constant_op.constant(1., shape=[1, 1])
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = keras.models.Sequential([
keras.layers.Dropout(0.2),
keras.layers.Dense(1),
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(model, save_dir)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a SavedModel has debug info captured."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
class FromKerasModelTest(TestModels):
@test_util.run_v2_only
def testSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = constant_op.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = keras.models.Sequential([
keras.layers.Dropout(0.2),
keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testSequentialMultiInputOutputModel(self):
"""Test a tf.Keras model with multiple inputs and outputs."""
left_input_data = constant_op.constant(1., shape=[1, 3])
right_input_data = constant_op.constant(1., shape=[1, 3])
# Create a simple Keras model.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_c_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 2))
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_a, interm_b], name='merge')
output_c = keras.layers.Dense(
3, activation='softmax', name='dense_2')(
merged)
output_d = keras.layers.Dense(
2, activation='softmax', name='dense_3')(
merged)
model = keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
input_data = [left_input_data, right_input_data]
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, input_data)
for tf_result, tflite_result in zip(expected_value, actual_value):
np.testing.assert_almost_equal(tf_result[0], tflite_result, 5)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a tf.Keras model has debug info captured."""
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = keras.models.Sequential(
[keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
class GrapplerTest(TestModels):
@test_util.run_v2_only
def testConstantFolding(self):
# Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added.
input_data = constant_op.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.],
shape=[3, 3])
@def_function.function
def func(x):
y_const = constant_op.constant([1., 2., 3.])
y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])
return math_ops.matmul(x, y_broadcast)
root = tracking.AutoTrackable()
root.f = func
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
np.testing.assert_array_equal(expected_value.numpy(), actual_value)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/lite_v2_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite Python Interface: Sanity check."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.lite.python import convert
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import op_hint
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.framework.graph_util_impl import _node_name
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ConvertTest(test_util.TensorFlowTestCase):
def testBasic(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Try running on valid graph
tflite_model = convert.toco_convert(sess.graph_def, [in_tensor],
[out_tensor])
self.assertTrue(tflite_model)
def testQuantization(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1.)
sess = session.Session()
tflite_model = convert.toco_convert(
sess.graph_def, [in_tensor], [out_tensor],
inference_type=lite_constants.QUANTIZED_UINT8,
quantized_input_stats=[(0., 1.)])
self.assertTrue(tflite_model)
def testQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1.)
sess = session.Session()
with self.assertRaises(ValueError) as error:
convert.toco_convert(
sess.graph_def, [in_tensor], [out_tensor],
inference_type=lite_constants.QUANTIZED_UINT8)
self.assertEqual(
"std_dev and mean must be defined when inference_input_type is "
"QUANTIZED_UINT8.", str(error.exception))
def testGraphDefBasic(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input")
_ = in_tensor + in_tensor
sess = session.Session()
tflite_model = convert.toco_convert_graph_def(
sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
enable_mlir_converter=False,
inference_type=lite_constants.FLOAT)
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual("input", input_details[0]["name"])
self.assertEqual(np.float32, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
self.assertEqual((0., 0.), input_details[0]["quantization"])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("add", output_details[0]["name"])
self.assertEqual(np.float32, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
self.assertEqual((0., 0.), output_details[0]["quantization"])
def testGraphDefQuantization(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
sess = session.Session()
input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
output_arrays = ["output"]
tflite_model = convert.toco_convert_graph_def(
sess.graph_def,
input_arrays_map,
output_arrays,
enable_mlir_converter=False,
inference_type=lite_constants.QUANTIZED_UINT8,
quantized_input_stats=[(0., 1.), (0., 1.)])
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual("inputA", input_details[0]["name"])
self.assertEqual(np.uint8, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
self.assertEqual((1., 0.),
input_details[0]["quantization"]) # scale, zero_point
self.assertEqual("inputB", input_details[1]["name"])
self.assertEqual(np.uint8, input_details[1]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all())
self.assertEqual((1., 0.),
input_details[1]["quantization"]) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("output", output_details[0]["name"])
self.assertEqual(np.uint8, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
self.assertTrue(output_details[0]["quantization"][0] > 0) # scale
def testGraphDefQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
sess = session.Session()
input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
output_arrays = ["output"]
with self.assertRaises(ValueError) as error:
convert.toco_convert_graph_def(
sess.graph_def,
input_arrays_map,
output_arrays,
enable_mlir_converter=False,
inference_type=lite_constants.QUANTIZED_UINT8)
self.assertEqual(
"std_dev and mean must be defined when inference_input_type is "
"QUANTIZED_UINT8.", str(error.exception))
class ConvertTestOpHint(test_util.TensorFlowTestCase):
"""Test the hint to stub functionality."""
def _getGraphOpTypes(self, graphdef, output_nodes):
"""Returns used op types in `graphdef` reachable from `output_nodes`.
This is used to check that after the stub transformation the expected
nodes are there.
NOTE: this is not a exact test that the graph is the correct output, but
it balances compact expressibility of test with sanity checking.
Args:
graphdef: TensorFlow proto graphdef.
output_nodes: A list of output node names that we need to reach.
Returns:
A set of node types reachable from `output_nodes`.
"""
name_to_input_name, name_to_node, _ = (
_extract_graph_summary(graphdef))
# Find all nodes that are needed by the outputs
used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name)
return set([name_to_node[node_name].op for node_name in used_node_names])
def _countIdentities(self, nodes):
"""Count the number of "Identity" op types in the list of proto nodes.
Args:
nodes: NodeDefs of the graph.
Returns:
The number of nodes with op type "Identity" found.
"""
return len([x for x in nodes if x.op == "Identity"])
def testSwishLiteHint(self):
"""Makes a custom op swish and makes sure it gets converted as a unit."""
with ops.Graph().as_default():
image = array_ops.constant([1., 2., 3., 4.])
swish_scale = array_ops.constant(1.0)
def _swish(input_tensor, scale):
custom = op_hint.OpHint("cool_activation")
input_tensor, scale = custom.add_inputs(input_tensor, scale)
output = math_ops.sigmoid(input_tensor) * input_tensor * scale
output, = custom.add_outputs(output)
return output
output = array_ops.identity(
_swish(image, swish_scale), name="ModelOutput")
with self.cached_session() as sess:
# check if identities have been put into the graph (2 input, 1 output,
# and 1 final output).
self.assertEqual(self._countIdentities(sess.graph_def.node), 4)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["cool_activation", "Const", "Identity"]))
def testScaleAndBiasAndIdentity(self):
"""This tests a scaled add which has 3 inputs and 2 outputs."""
with ops.Graph().as_default():
a = array_ops.constant(1.)
x = array_ops.constant([2., 3.])
b = array_ops.constant([4., 5.])
def _scaled_and_bias_and_identity(a, x, b):
custom = op_hint.OpHint("scale_and_bias_and_identity")
a, x, b = custom.add_inputs(a, x, b)
return custom.add_outputs(a * x + b, x)
output = array_ops.identity(
_scaled_and_bias_and_identity(a, x, b), name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (3) and output (2) => 3 + 2 = 5
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 6)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"]))
def testTwoFunctions(self):
"""Tests if two functions are converted correctly."""
with ops.Graph().as_default():
a = array_ops.constant([1.])
b = array_ops.constant([1.])
def _double_values(x):
custom = op_hint.OpHint("add_test")
x, = custom.add_inputs(x)
output = math_ops.multiply(x, x)
output, = custom.add_outputs(output)
return output
output = array_ops.identity(
math_ops.add(_double_values(a), _double_values(b)),
name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (2) and output (2) => 2 + 2
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 5)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["add_test", "Const", "Identity", "Add"]))
def _get_input_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_INPUT_INDEX_ATTR].i
def _get_output_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i
def _get_sort_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_SORT_INDEX_ATTR].i
def testTags(self):
"""Test if multiple args with the same tag are grouped."""
with ops.Graph().as_default():
a = array_ops.constant([1.])
b = array_ops.constant([2.])
c = array_ops.constant([3.])
d = array_ops.constant([4.])
custom = op_hint.OpHint("test_tag")
a = custom.add_input(
a, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b, = custom.add_inputs(b)
c = custom.add_input(
c, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK)
d = custom.add_input(
d, tag="mytag2", aggregate=op_hint.OpHint.AGGREGATE_STACK)
res = math_ops.add(math_ops.mul(a, b), math_ops.mul(c, b))
custom.add_outputs([res])
with self.cached_session():
self.assertEqual(self._get_input_index(a), 0)
self.assertEqual(self._get_sort_index(a), 0)
self.assertEqual(self._get_input_index(b), 1)
self.assertEqual(self._get_sort_index(b), 0)
self.assertEqual(self._get_input_index(c), 0)
self.assertEqual(self._get_sort_index(c), 1)
def testOverrideIndex(self):
with ops.Graph().as_default():
a = array_ops.constant([1.])
b = array_ops.constant([2.])
c = array_ops.constant([3.])
custom = op_hint.OpHint("test_override")
b = custom.add_input(b) # should auto assign 0
a = custom.add_input(a, index_override=1)
c = custom.add_input(c) # should auto assign 2
with self.cached_session():
self.assertEqual(self._get_input_index(a), 1)
self.assertEqual(self._get_input_index(b), 0)
self.assertEqual(self._get_input_index(c), 2)
def testAggregate(self):
with ops.Graph().as_default():
a = array_ops.constant([3., 4.])
b = array_ops.constant([5., 6.])
hint = op_hint.OpHint("agg")
a0, a1 = array_ops.unstack(a)
b0, b1 = array_ops.unstack(b)
a0 = hint.add_input(a0, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b0 = hint.add_input(b0, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
a1 = hint.add_input(a1, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b1 = hint.add_input(b1, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c0 = math_ops.add(a0, b0, name="addleft")
c1 = math_ops.add(a1, b1, name="addright")
c0 = hint.add_output(
c0, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c1 = hint.add_output(
c1, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
curr = array_ops.stack([c0, c1])
output = array_ops.identity(curr, name="FINAL_OUTPUT")
with self.cached_session() as sess:
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["agg", "Const", "Identity"]))
def testFindHintedOutputNodes(self):
"""Test if all hinted output nodes are correctly found."""
with ops.Graph().as_default():
def _build_ophinted_op(name, input1, input2):
custom_op = op_hint.OpHint(name)
input1 = custom_op.add_input(input1)
input2 = custom_op.add_input(input2)
output = math_ops.mul(input1, input2)
return custom_op.add_output(output)
output_1 = _build_ophinted_op("custom_op_1", array_ops.constant([1.]),
array_ops.constant([2.]))
output_2 = _build_ophinted_op("custom_op_2", array_ops.constant([3.]),
array_ops.constant([4.]))
with self.cached_session() as sess:
hinted_outputs_nodes = op_hint.find_all_hinted_output_nodes(sess)
expected_hinted_output_nodes = [
_node_name(output_1.name),
_node_name(output_2.name)
]
self.assertEqual(
len(hinted_outputs_nodes), len(expected_hinted_output_nodes))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/convert_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Replaces a subgraph of a TensorFlow GraphDef with a single node.
In conjunction with TOCO's --allow_custom_op this script allows selected
portions of a TensorFlow GraphDef to be executed by custom code.
Example:
bazel run tensorflow/lite/python:create_custom_op -- \
--input_graph=/tmp/input.pb \
--output_graph=/tmp/output.pb \
--inputs=concat,concat_1 \
--outputs=detection_classes \
--op_definition='op:"PostProcessing" attr{key:"num" value:{i:10}}'
The above will identify a subgraph starting at nodes 'concat' and 'concat_1',
and ending at 'detection_classes'. All nodes in between will be removed and
replaced by a new op called 'PostProcessing'.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid as _uuid
from absl import app
from absl import flags
from google.protobuf import text_format
from tensorflow.contrib.framework.python.framework.graph_util import fuse_op
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input_graph", "", "Binary graphdef to load.")
flags.DEFINE_string("output_graph", "", "Resulting binary graphdef.")
flags.DEFINE_string("inputs", "",
"Comma-separated list of inputs to the subgraph.")
flags.DEFINE_string("outputs", "",
"Comma-separated list of outputs of the subgraph.")
flags.DEFINE_string("op_definition", "",
"A text NodeDef defining the contents of the custom op.")
def _read_graph_def(filename):
if not gfile.Exists(filename):
raise ValueError("Input graph file '" + filename + "' does not exist!")
graph_def = graph_pb2.GraphDef()
with gfile.GFile(filename, "rb") as f:
graph_def.ParseFromString(f.read())
return graph_def
def _write_graph_def(graph_def, filename):
if not filename:
raise ValueError("Output graph file not specified")
with gfile.Open(filename, "wb") as f:
f.write(graph_def.SerializeToString())
def _collapse_subgraph(graph_def, inputs, outputs, op_definition):
"""Substitute a custom op for the subgraph delimited by inputs and outputs."""
name = _uuid.uuid1().hex
# We need a default type, but it can be changed using 'op_definition'.
default_type = types_pb2.DT_FLOAT
new_graph = fuse_op(
graph_def=graph_def,
input_nodes=inputs,
output_nodes=outputs,
output_dtypes=[default_type for _ in outputs],
output_quantized=False,
op_name=name,
op_type="CustomTfLiteOp")
node_def = node_def_pb2.NodeDef()
text_format.Parse(op_definition, node_def)
for node in new_graph.node:
if node.name == name:
node.MergeFrom(node_def)
return new_graph
def main(argv):
del argv # unused
graph = _read_graph_def(filename=flags.FLAGS.input_graph)
graph = _collapse_subgraph(
graph_def=graph,
inputs=flags.FLAGS.inputs.split(","),
outputs=flags.FLAGS.outputs.split(","),
op_definition=flags.FLAGS.op_definition)
_write_graph_def(graph_def=graph, filename=flags.FLAGS.output_graph)
if __name__ == "__main__":
app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/create_custom_op.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for TFLite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export as _tf_export
FLOAT = dtypes.float32
FLOAT16 = dtypes.float16
INT32 = dtypes.int32
INT64 = dtypes.int64
STRING = dtypes.string
QUANTIZED_UINT8 = dtypes.uint8
INT8 = dtypes.int8
COMPLEX64 = dtypes.complex64
TENSORFLOW_GRAPHDEF = _toco_flags_pb2.TENSORFLOW_GRAPHDEF
TFLITE = _toco_flags_pb2.TFLITE
GRAPHVIZ_DOT = _toco_flags_pb2.GRAPHVIZ_DOT
_tf_export(v1=["lite.constants.FLOAT"]).export_constant(__name__, "FLOAT")
_tf_export(v1=["lite.constants.FLOAT16"]).export_constant(__name__, "FLOAT16")
_tf_export(v1=["lite.constants.INT32"]).export_constant(__name__, "INT32")
_tf_export(v1=["lite.constants.INT64"]).export_constant(__name__, "INT64")
_tf_export(v1=["lite.constants.STRING"]).export_constant(__name__, "STRING")
_tf_export(v1=["lite.constants.QUANTIZED_UINT8"]).export_constant(
__name__, "QUANTIZED_UINT8")
_tf_export(v1=["lite.constants.INT8"]).export_constant(__name__, "INT8")
_tf_export(v1=["lite.constants.TFLITE"]).export_constant(__name__, "TFLITE")
_tf_export(v1=["lite.constants.GRAPHVIZ_DOT"]).export_constant(
__name__, "GRAPHVIZ_DOT")
# Currently the default mode of operation is to shell to another python process
# to protect against crashes. However, it breaks some dependent targets because
# it forces us to depend on an external py_binary. The experimental API doesn't
# have that drawback.
EXPERIMENTAL_USE_TOCO_API_DIRECTLY = False
_allowed_symbols = [
"FLOAT",
"FLOAT16",
"INT32",
"INT64",
"STRING",
"QUANTIZED_UINT8",
"INT8",
"COMPLEX64",
"TENSORFLOW_GRAPHDEF",
"TFLITE",
"GRAPHVIZ_DOT",
"EXPERIMENTAL_USE_TOCO_API_DIRECTLY",
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/lite_constants.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for post training quantization with calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.lazy_loader import LazyLoader
# Lazy load since some of the performance benchmark skylark rules
# break dependencies. Must use double quotes to match code internal rewrite
# rule.
_calibration_wrapper = LazyLoader(
"_calibration_wrapper", globals(),
"tensorflow.lite.python.optimize."
"tensorflow_lite_wrap_calibration_wrapper")
class Calibrator(object):
"""Calibrates a floating point model and then quantizes it.
This is an internal class, not a public interface.
"""
def __init__(self, model_content):
"""Constructor.
Args:
model_content: Content of a TF-Lite Flatbuffer file.
Raises:
ValueError: If the calibrator was unable to open the model.
"""
if not model_content:
raise ValueError("`model_content` must be specified.")
try:
self._calibrator = (_calibration_wrapper.CalibrationWrapper
.CreateWrapperCPPFromBuffer(model_content))
except Exception as e:
raise ValueError("Failed to parse the model: %s." % e)
if not self._calibrator:
raise ValueError("Failed to parse the model.")
def calibrate_and_quantize(self, dataset_gen, input_type, output_type,
allow_float):
"""Calibrates the model with specified generator and then quantizes it.
Returns:
A quantized model.
Args:
dataset_gen: A generator that generates calibration samples.
input_type: A tf.dtype representing the desired real-value input type.
output_type: A tf.dtype representing the desired real-value output type.
allow_float: A boolean. False if the resulting model cannot perform float
computation, useful when targeting an integer-only backend.
If False, an error will be thrown if an operation cannot be
quantized, otherwise the model will fallback to float ops.
"""
self._calibrator.Prepare()
for calibration_sample in dataset_gen():
self._calibrator.FeedTensor(calibration_sample)
return self._calibrator.QuantizeModel(
np.dtype(input_type.as_numpy_dtype()).num,
np.dtype(output_type.as_numpy_dtype()).num, allow_float)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/optimize/calibrator.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Calibrator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class CalibratorTest(test_util.TensorFlowTestCase):
def test_calibration_with_quantization(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]
quantized_model = quantizer.calibrate_and_quantize(input_gen,
constants.FLOAT,
constants.FLOAT, False)
self.assertIsNotNone(quantized_model)
def test_calibration_with_quantization_allow_float(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]
quantized_model = quantizer.calibrate_and_quantize(input_gen,
constants.FLOAT,
constants.FLOAT, True)
self.assertIsNotNone(quantized_model)
def test_calibration_with_quantization_multiple_inputs(self):
# Load multi add model from test data.
# This model has 4 inputs of size (1, 8, 8, 3).
model_path = resource_loader.get_path_to_datafile(
'../../testdata/multi_add.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 8, 8, 3), dtype=np.float32) for _ in range(4)]
quantized_model = quantizer.calibrate_and_quantize(input_gen,
constants.FLOAT,
constants.FLOAT, False)
self.assertIsNotNone(quantized_model)
def test_invalid_model_buffer(self):
float_model = b'\0' * 100
with self.assertRaisesWithRegexpMatch(ValueError,
'Failed to parse the model'):
_calibrator.Calibrator(float_model)
def test_empty_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
def empty_input_gen():
for i in ():
yield i
with self.assertRaises(RuntimeError):
quantizer.calibrate_and_quantize(empty_input_gen, constants.FLOAT,
constants.FLOAT, False)
def test_invalid_shape_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator with incorrect shape.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 2, 2, 3), dtype=np.float32)]
with self.assertRaisesWithRegexpMatch(ValueError, 'Dimension mismatch'):
quantizer.calibrate_and_quantize(input_gen, constants.FLOAT,
constants.FLOAT, False)
def test_invalid_type_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator with incorrect shape.
def input_gen():
for _ in range(10):
yield np.ones(shape=(1, 5, 5, 3), dtype=np.int32)
with self.assertRaises(ValueError):
quantizer.calibrate_and_quantize(input_gen, constants.FLOAT,
constants.FLOAT, False)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/python/optimize/calibrator_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import tensorflow as tf
import traceback
from tensorflow.lite.testing import generate_examples_lib
def toco_options(data_types,
input_arrays,
output_arrays,
shapes,
extra_toco_options=None):
"""Create TOCO options to process a model.
Args:
data_types: input and inference types used by TOCO.
input_arrays: names of the input tensors
output_arrays: name of the output tensors
shapes: shapes of the input tensors
extra_toco_options: additional toco options
Returns:
the options in a string.
"""
if extra_toco_options is None:
extra_toco_options = generate_examples_lib.ExtraTocoOptions()
shape_str = ":".join([",".join(str(y) for y in x) for x in shapes if x])
inference_type = "FLOAT"
# TODO(ahentz): if we get multi-input quantization to work we need this
# to change
if data_types[0] == "QUANTIZED_UINT8":
inference_type = "QUANTIZED_UINT8"
s = (" --input_data_types=%s" % ",".join(data_types) +
" --inference_type=%s" % inference_type +
" --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
" --input_arrays=%s" % ",".join(input_arrays) +
" --output_arrays=%s" % ",".join(output_arrays))
if shape_str:
s += (" --input_shapes=%s" % shape_str)
if extra_toco_options.drop_control_dependency:
s += " --drop_control_dependency"
if extra_toco_options.allow_custom_ops:
s += " --allow_custom_ops"
if extra_toco_options.rnn_states:
s += (" --rnn_states='" + extra_toco_options.rnn_states + "'")
if extra_toco_options.split_tflite_lstm_inputs is not None:
if extra_toco_options.split_tflite_lstm_inputs:
s += " --split_tflite_lstm_inputs=true"
else:
s += " --split_tflite_lstm_inputs=false"
return s
def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs):
"""Convert a model's graph def into a tflite model.
NOTE: this currently shells out to the toco binary, but we would like
convert to Python API tooling in the future.
Args:
options: An Options instance.
graph_def: A GraphDef object.
input_tensors: List of input tensor tuples `(name, shape, type)`.
output_tensors: List of output tensors (names).
**kwargs: Extra options to be passed.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
# Convert ophint ops if presented.
graph_def = tf.lite.experimental.convert_op_hints_to_stubs(
graph_def=graph_def)
graph_def_str = graph_def.SerializeToString()
extra_toco_options = kwargs.get(
"extra_toco_options", generate_examples_lib.ExtraTocoOptions())
test_params = kwargs.get("test_params", {})
input_arrays = [x[0] for x in input_tensors]
data_types = [
generate_examples_lib.TF_TYPE_INFO[x[2]][1] for x in input_tensors]
if test_params.get("fully_quantize", False):
with tempfile.NamedTemporaryFile() as graphdef_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
input_shapes = generate_examples_lib.get_input_shapes_map(input_tensors)
converter = tf.lite.TocoConverter.from_frozen_graph(
graphdef_file.name, input_arrays, output_tensors, input_shapes)
def representative_dataset(input_tensors):
calibration_inputs = []
for _, shape, _ in input_tensors:
if shape:
dims = [dim.value for dim in shape.dims]
calibration_inputs.append(
np.random.uniform(-1, 1, tuple(dims)).astype(np.float32))
return calibration_inputs
def representative_dataset_gen():
for _ in range(100):
yield representative_dataset(input_tensors)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.representative_dataset = representative_dataset_gen
if extra_toco_options.inference_input_type:
converter.inference_input_type = (
extra_toco_options.inference_input_type)
if extra_toco_options.inference_output_type:
converter.inference_output_type = (
extra_toco_options.inference_output_type)
try:
tflite_model = converter.convert()
return tflite_model, ""
except Exception as e:
log = "{0}\n{1}".format(str(e), traceback.format_exc())
return None, log
else:
opts = toco_options(
data_types=data_types,
input_arrays=input_arrays,
shapes=[x[1] for x in input_tensors],
output_arrays=output_tensors,
extra_toco_options=extra_toco_options)
with tempfile.NamedTemporaryFile() as graphdef_file, \
tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
# TODO(aselle): Switch this to subprocess at some point.
if options.run_with_flex:
opts += " --enable_select_tf_ops --force_select_tf_ops"
cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" %
(options.toco, graphdef_file.name, output_file.name, opts,
stdout_file.name))
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/testing/toco_convert.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Make HTML tables that report where TF and TOCO failed to convert models.
This is primarily used by generate_examples.py. See it or
`make_report_table` for more details on usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cgi
import json
FAILED = "FAILED"
SUCCESS = "SUCCESS"
NOTRUN = "NOTRUN"
def make_report_table(fp, title, reports):
"""Make an HTML report of the success/failure reports.
Args:
fp: File-like object in which to put the html.
title: "Title of the zip file this pertains to."
reports: a list of conversion attempts. (report_args, report_vals) i.e.
({"shape": [1,2,3], "type": "tf.float32"},
{"tf": "SUCCESS", "toco": "FAILURE", "toco_log": "Unsupported type.",
"tf_log": ""})
"""
# sort reports by if TOCO failure and then TF failure (reversed)
reports.sort(key=lambda x: x[1]["toco"], reverse=False)
reports.sort(key=lambda x: x[1]["tf"], reverse=True)
def result_cell(x, row, col):
"""Produce a cell with the condition string `x`."""
s = cgi.escape(repr(x), quote=True)
color = "#44ff44" if x == SUCCESS else (
"#ff4444" if x == FAILED else "#eeeeee")
handler = "ShowLog(%d, %d)" % (row, col)
fp.write("<td style='background-color: %s' onclick='%s'>%s</td>\n" % (
color, handler, s))
fp.write("""<html>
<head>
<title>tflite report</title>
<style>
body { font-family: Arial; }
th { background-color: #555555; color: #eeeeee; }
td { vertical-align: top; }
td.horiz {width: 50%;}
pre { white-space: pre-wrap; word-break: keep-all; }
table {width: 100%;}
</style>
</head>
""")
# Write the log data to a javascript variable and also make a function
# in javascript to show the log when an item is clicked.
fp.write("<script> \n")
fp.write("""
function ShowLog(row, col) {
var log = document.getElementById("log");
log.innerHTML = "<pre>" + data[row][col] + "</pre>";
}
""")
fp.write("var data = \n")
fp.write(json.dumps([[cgi.escape(x[1]["tf_log"], quote=True),
cgi.escape(x[1]["toco_log"], quote=True)]
for x in reports]))
fp.write(";</script>\n")
# Write the main table and use onclick on the items that have log items.
fp.write("""
<body>
<h1>TOCO Conversion</h1>
<h2>%s</h2>
""" % title)
# Get a list of keys that are in any of the records.
param_keys = {}
for params, _ in reports:
for k in params.keys():
param_keys[k] = True
fp.write("<table>\n")
fp.write("<tr><td class='horiz'>\n")
fp.write("<div style='height:1000px; overflow:auto'>\n")
fp.write("<table>\n")
fp.write("<tr>\n")
for p in param_keys:
fp.write("<th>%s</th>\n" % cgi.escape(p, quote=True))
fp.write("<th>TensorFlow</th>\n")
fp.write("<th>TOCO</th>\n")
fp.write("</tr>\n")
for idx, (params, vals) in enumerate(reports):
fp.write("<tr>\n")
for p in param_keys:
fp.write(" <td>%s</td>\n" % cgi.escape(repr(params[p]), quote=True))
result_cell(vals["tf"], idx, 0)
result_cell(vals["toco"], idx, 1)
fp.write("</tr>\n")
fp.write("</table>\n")
fp.write("</div>\n")
fp.write("</td>\n")
fp.write("<td class='horiz' id='log'></td></tr>\n")
fp.write("</table>\n")
fp.write("<script>\n")
fp.write("</script>\n")
fp.write("""
</body>
</html>
""")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/testing/generate_examples_report.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import functools
import itertools
import operator
import os
import random
import re
import string
import traceback
import zipfile
import numpy as np
from six import StringIO
from six.moves import xrange
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# pylint: disable=g-import-not-at-top
import tensorflow as tf
from google.protobuf import text_format
# TODO(aselle): switch to TensorFlow's resource_loader
from tensorflow.contrib.quantize.python import quantize_graph
from tensorflow.lite.testing import generate_examples_report as report_lib
from tensorflow.lite.testing import string_util_wrapper
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import spectral_ops_test_util
RANDOM_SEED = 342
TEST_INPUT_DEPTH = 3
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
KNOWN_BUGS = {
# TOCO doesn't support scalars as input.
# Concat doesn't work with a single input tensor
r"concat.*num_tensors=1": "67378344",
# Softmax graphs are too complex.
r"softmax.*dim=0": "67749831",
# BatchToSpaceND only supports 4D tensors.
r"batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\]": "70594733",
# Div will use floordiv.
r"div.*int32": "72051395",
# Strided slice cannot handle new_axis_mask.
r"strided_slice.*spec=\[None": "137470173",
}
class Options(object):
"""All options for example generation."""
def __init__(self):
# Directory where the outputs will be go.
self.output_path = None
# Particular zip to output.
self.zip_to_output = None
# Path to toco tool.
self.toco = None
# If a particular model is affected by a known bug count it as a Toco
# error.
self.known_bugs_are_errors = False
# Raise an exception if any converter error is encountered.
self.ignore_converter_errors = False
# Include intermediate graphdefs in the output zip files.
self.save_graphdefs = False
# Whether the TFLite Flex converter is being used.
self.run_with_flex = False
# Whether to generate test cases for edgetpu.
self.make_edgetpu_tests = False
# The function to convert a TensorFLow model to TFLite model.
# See the document for `toco_convert` function for its required signature.
self.tflite_convert_function = None
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
self.known_bugs = KNOWN_BUGS
# Make tests by setting TF forward compatibility horizon to the future.
self.make_forward_compat_test = False
# A map from names to functions which make test cases.
_MAKE_TEST_FUNCTIONS_MAP = {}
# A decorator to register the make test functions.
# Usage:
# All the make_*_test should be registered. Example:
# @register_make_test_function()
# def make_conv_tests(options):
# # ...
# If a function is decorated by other decorators, it's required to specify the
# name explicitly. Example:
# @register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
# @test_util.enable_control_flow_v2
# def make_unidirectional_sequence_lstm_tests(options):
# # ...
def register_make_test_function(name=None):
def decorate(function, name=name):
if name is None:
name = function.__name__
_MAKE_TEST_FUNCTIONS_MAP[name] = function
return decorate
class ExtraTocoOptions(object):
"""Additional toco options besides input, output, shape."""
def __init__(self):
# Whether to ignore control dependency nodes.
self.drop_control_dependency = False
# Allow custom ops in the toco conversion.
self.allow_custom_ops = False
# Rnn states that are used to support rnn / lstm cells.
self.rnn_states = None
# Split the LSTM inputs from 5 inoputs to 18 inputs for TFLite.
self.split_tflite_lstm_inputs = None
# The inference input type passed to TFLiteConvert.
self.inference_input_type = None
# The inference output type passed to TFLiteConvert.
self.inference_output_type = None
def format_result(t):
"""Convert a tensor to a format that can be used in test specs."""
if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]:
# Output 9 digits after the point to ensure the precision is good enough.
values = ["{:.9f}".format(value) for value in list(t.flatten())]
return ",".join(values)
else:
return string_util_wrapper.SerializeAsHexString(t.flatten())
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
def write_tensor(fp, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
fp.write("values," + format_result(x) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for i in example["inputs"]:
write_tensor(fp, i)
fp.write("outputs,%d\n" % len(example["outputs"]))
for i in example["outputs"]:
write_tensor(fp, i)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
fp.write("load_model: %s\n" % os.path.basename(model_name))
for example in examples:
fp.write("reshape {\n")
for t in example["inputs"]:
fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n")
fp.write("}\n")
fp.write("invoke {\n")
for t in example["inputs"]:
fp.write(" input: \"" + format_result(t) + "\"\n")
for t in example["outputs"]:
fp.write(" output: \"" + format_result(t) + "\"\n")
fp.write(" output_shape: \"" + ",".join([str(dim) for dim in t.shape]) +
"\"\n")
fp.write("}\n")
TF_TYPE_INFO = {
tf.float32: (np.float32, "FLOAT"),
tf.float16: (np.float16, "FLOAT"),
tf.int32: (np.int32, "INT32"),
tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
tf.int16: (np.int16, "QUANTIZED_INT16"),
tf.int64: (np.int64, "INT64"),
tf.bool: (np.bool, "BOOL"),
tf.string: (np.string_, "STRING"),
}
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in TF_TYPE_INFO:
dtype = TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value-min_value)*np.random.random_sample(shape)+min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value+1, shape)
elif dtype == tf.bool:
value = np.random.choice([True, False], size=shape)
elif dtype == np.string_:
# Not the best strings, but they will do for some basic testing.
letters = list(string.ascii_uppercase)
return np.random.choice(letters, size=shape).astype(dtype)
return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(
dtype)
def create_scalar_data(dtype, min_value=-100, max_value=100):
"""Build scalar tensor data range from min_value to max_value exclusively."""
if dtype in TF_TYPE_INFO:
dtype = TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value - min_value) * np.random.random() + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1)
return np.array(value, dtype=dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return tf_graph_util.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
@register_make_test_function()
def make_control_dep_tests(options):
"""Make a set of tests that use control dependencies."""
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
with tf.control_dependencies([assert_op]):
out = tf.nn.conv2d(input_tensor, filter_value,
strides=(1, 1, 1, 1), padding="SAME")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.drop_control_dependency = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
expected_tf_failures=3)
def get_input_shapes_map(input_tensors):
"""Gets a map of input names to shapes.
Args:
input_tensors: List of input tensor tuples `(name, shape, type)`.
Returns:
{string : list of integers}.
"""
input_arrays = [tensor[0] for tensor in input_tensors]
input_shapes_list = []
for _, shape, _ in input_tensors:
dims = None
if shape:
dims = [dim.value for dim in shape.dims]
input_shapes_list.append(dims)
input_shapes = {
name: shape
for name, shape in zip(input_arrays, input_shapes_list)
if shape
}
return input_shapes
def normalize_output_name(output_name):
"""Remove :0 suffix from tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
# How many test cases we may have in a zip file. Too many test cases will
# slow down the test data generation process.
_MAX_TESTS_PER_ZIP = 500
def make_zip_of_tests(options,
test_parameters,
make_graph,
make_test_inputs,
extra_toco_options=ExtraTocoOptions(),
use_frozen_graph=False,
expected_tf_failures=0):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartestian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartestian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to tflite
with toco, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
options: An Options instance.
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
extra_toco_options: Additional toco options.
use_frozen_graph: Whether or not freeze graph before toco converter.
expected_tf_failures: Number of times tensorflow is expected to fail in
executing the input graphs. In some cases it is OK for TensorFlow to
fail because the one or more combination of parameters is invalid.
Raises:
RuntimeError: if there are converter errors that can't be ignored.
"""
zip_path = os.path.join(options.output_path, options.zip_to_output)
parameter_count = 0
for parameters in test_parameters:
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
if parameter_count > _MAX_TESTS_PER_ZIP:
raise RuntimeError(
"Too many parameter combinations for generating '%s'.\n"
"There are %d combinations while the upper limit is %d.\n"
"Having too many combinations will slow down the tests.\n"
"Please consider splitting the test into multiple functions.\n"
% (zip_path, parameter_count, _MAX_TESTS_PER_ZIP))
# TODO(aselle): Make this allow multiple inputs outputs.
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
toco_errors = 0
processed_labels = set()
if options.make_edgetpu_tests:
extra_toco_options.inference_input_type = tf.lite.constants.QUANTIZED_UINT8
extra_toco_options.inference_output_type = tf.lite.constants.QUANTIZED_UINT8
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = zip_path.replace(".zip", "_") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
if label in processed_labels:
# Do not populate data for the same label more than once. It will cause
# errors when unzipping.
continue
processed_labels.add(label)
param_dict = dict(zip(keys, curr))
if options.make_edgetpu_tests and not param_dict.get(
"fully_quantize", False):
continue
def build_tflite_inputs(tflite_model_binary):
# Build input values and output values of the given tflite model.
interpreter = tf.lite.Interpreter(model_content=tflite_model_binary)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
input_values = []
for input_detail in input_details:
# TODO(yunluli): Set proper min max value according to dtype.
input_value = create_tensor_data(
input_detail["dtype"],
input_detail["shape"],
min_value=0,
max_value=255)
interpreter.set_tensor(input_detail["index"], input_value)
input_values.append(input_value)
interpreter.invoke()
output_details = interpreter.get_output_details()
output_values = []
for output_detail in output_details:
output_values.append(interpreter.get_tensor(output_detail["index"]))
return input_values, output_values
def build_example(label, param_dict_real):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model (i.e. the filename in the zip).
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `toco_log` (log of toco conversion), `tf_log` (log of tf
conversion), `toco` (a string of success status of the conversion),
`tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
# Build graph
report["tf_log"] = ""
report["toco_log"] = ""
tf.reset_default_graph()
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.compat.v1.Session()
try:
baseline_inputs, baseline_outputs = (make_test_inputs(
param_dict_real, sess, inputs, outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["toco"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Convert graph to toco
input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
input_tensor.dtype) for input_tensor in inputs]
output_tensors = [normalize_output_name(out.name) for out in outputs]
graph_def = freeze_graph(
sess,
tf.global_variables() + inputs +
outputs) if use_frozen_graph else sess.graph_def
if "split_tflite_lstm_inputs" in param_dict_real:
extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
"split_tflite_lstm_inputs"]
tflite_model_binary, toco_log = options.tflite_convert_function(
options,
graph_def,
input_tensors,
output_tensors,
extra_toco_options=extra_toco_options,
test_params=param_dict_real)
report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None
else report_lib.FAILED)
report["toco_log"] = toco_log
if options.save_graphdefs:
archive.writestr(label + ".pbtxt",
text_format.MessageToString(graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
if options.make_edgetpu_tests:
baseline_inputs, baseline_outputs = build_tflite_inputs(
tflite_model_binary)
archive.writestr(label + ".bin", tflite_model_binary,
zipfile.ZIP_DEFLATED)
example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
example_fp = StringIO()
write_examples(example_fp, [example])
archive.writestr(label + ".inputs",
example_fp.getvalue(), zipfile.ZIP_DEFLATED)
example_fp2 = StringIO()
write_test_cases(example_fp2, label + ".bin", [example])
archive.writestr(label + "_tests.txt",
example_fp2.getvalue(), zipfile.ZIP_DEFLATED)
zip_manifest.append(label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict)
if report["toco"] == report_lib.FAILED:
ignore_error = False
if not options.known_bugs_are_errors:
for pattern, bug_number in options.known_bugs.items():
if re.search(pattern, label):
print("Ignored converter error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
toco_errors += 1
print("-----------------\nconverter error!\n%s\n-----------------\n" %
report["toco_log"])
convert_report.append((param_dict, report))
report_io = StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
archive.writestr("report.html", report_io.getvalue())
archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(1 for x in convert_report
if x[1]["tf"] == report_lib.SUCCESS)
toco_success = sum(1 for x in convert_report
if x[1]["toco"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(toco_success) / float(tf_success) * 100.
tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d TOCO converted graphs (%.1f%%"), zip_path,
total_conversions, tf_success, toco_success, percent)
tf_failures = parameter_count - tf_success
if tf_failures / parameter_count > 0.8:
raise RuntimeError(("Test for '%s' is not very useful. "
"TensorFlow fails in %d percent of the cases.") %
(zip_path, int(100 * tf_failures / parameter_count)))
if not options.make_edgetpu_tests and tf_failures != expected_tf_failures:
raise RuntimeError(("Expected TF to fail %d times while generating '%s', "
"but that happened %d times") % (expected_tf_failures,
zip_path, tf_failures))
if not options.ignore_converter_errors and toco_errors > 0:
raise RuntimeError(
"Found %d errors while generating toco models" % toco_errors)
def make_pool_tests(pool_op_in):
"""Make a set of tests to do average pooling.
Args:
pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`.
Returns:
A function representing the true generator (after curried pool_op_in).
"""
pool_op = pool_op_in
def f(options, expected_tf_failures=0):
"""Actual function that generates examples.
Args:
options: An Options instance.
expected_tf_failures: number of expected tensorflow failures.
"""
# Chose a set of parameters
test_parameters = [{
"ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
"strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
# TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = pool_op(
input_tensor,
ksize=parameters["ksize"],
strides=parameters["strides"],
data_format=parameters["data_format"],
padding=parameters["padding"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return f
@register_make_test_function()
def make_l2_pool_tests(options):
make_pool_tests(make_l2_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_avg_pool_tests(options):
make_pool_tests(tf.nn.avg_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_max_pool_tests(options):
make_pool_tests(tf.nn.max_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_abs_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.abs(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-10, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_elu_tests(options):
"""Make a set of tests to do (float) tf.nn.elu."""
test_parameters = [
{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.elu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_hardswish_tests(options):
"""Make a set of tests to do hardswish."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
inp = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = inp * tf.nn.relu6(inp + np.float32(3)) * np.float32(1. / 6.)
return [inp], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-10, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
# Add additional validation if we are using toco.
# Flex and mlir doesn't yet support this. TODO(b/139193008): Fix
if not options.run_with_flex:
options.tflite_convert_function = functools.partial(
_tflite_convert_verify_num_ops,
options.tflite_convert_function,
num_ops=2)
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def _tflite_convert_verify_num_ops(tflite_convert_function, *args, **kwargs):
"""Verifies that the result of the conversion is a single op."""
num_ops = kwargs.pop("num_ops", 2)
result = tflite_convert_function(*args, **kwargs)
tflite_model_binary = result[0]
if not result[0]:
tf.logging.error(result[1]) # stderr from running tflite_convert.
raise RuntimeError("Failed to bulid model: \n\n" + result[1])
interpreter = tf.lite.Interpreter(model_content=tflite_model_binary)
interpreter.allocate_tensors()
if len(interpreter.get_tensor_details()) != num_ops:
raise RuntimeError("Expected to generate two node graph got %r " %
interpreter.get_tensor_details())
return result
@register_make_test_function()
def make_uint8_hardswish_tests(options):
"""Make a set of tests to do hardswish."""
# Chose a set of parameters.
test_parameters = [{
"input_shape": [[2, 3]],
"fully_quantize": [True],
}]
def build_graph(parameters):
"""Builds tensorflow graph."""
inp = tf.placeholder(dtype=tf.float32, name="input",
shape=parameters["input_shape"])
# Note: there is some magic about the inputs being in the range [-1,1]
# or else some quantization range need to be fixed.
qinp = array_ops.fake_quant_with_min_max_args(
inp, min=-1, max=1, num_bits=8)
relu6 = tf.nn.relu6(qinp + np.float32(3)) * np.float32(1. / 6.)
out = qinp * relu6
quantize_graph.experimental_create_eval_graph(
inp.graph, weight_bits=8, activation_bits=8)
return [qinp], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-1, max_value=1)
output_values = sess.run(outputs,
feed_dict=dict(zip(inputs, [input_values])))
return [input_values], output_values
# Add additional validation if we are using toco.
# Flex, doesn't yet support this. TODO(b/139193008): Remove this constraitn
if not options.run_with_flex:
# Expect 2 quantize operators and one hard swish resulting in 4 tensors.
options.tflite_convert_function = functools.partial(
_tflite_convert_verify_num_ops,
options.tflite_convert_function,
num_ops=4)
extra_toco_options = ExtraTocoOptions()
extra_toco_options.inference_input_type = tf.lite.constants.QUANTIZED_UINT8
extra_toco_options.inference_output_type = tf.lite.constants.QUANTIZED_UINT8
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options=extra_toco_options,
use_frozen_graph=True)
@register_make_test_function()
def make_identity_tests(options):
"""Make a set of tests to do identity."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [3, 3]],
"op_to_use": ["identity", "identity_n", "snapshot"],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# We add the Multiply before Identity just as a walk-around to make the test
# pass when input_shape is scalar.
# During graph transformation, TOCO will replace the Identity op with
# Reshape when input has shape. However, currently TOCO can't distinguish
# between missing shape and scalar shape. As a result, when input has scalar
# shape, this conversion still fails.
# TODO(b/129197312), remove the walk-around code once the bug is fixed.
input_doubled = input_tensor * 2.0
if parameters["op_to_use"] == "identity":
identity_output = tf.identity(input_doubled)
elif parameters["op_to_use"] == "identity_n":
# Testing `IdentityN` with a single tensor.
identity_output = tf.identity_n([input_doubled])[0]
elif parameters["op_to_use"] == "snapshot":
identity_output = array_ops.snapshot(input_doubled)
return [input_tensor], [identity_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu1_tests(options):
"""Make a set of tests to do relu1."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# Note that the following is not supported:
# out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))
out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu6_tests(options):
"""Make a set of tests to do relu6."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_prelu_tests(options):
"""Make a set of tests to do PReLU."""
test_parameters = [
{
# The canonical case for image processing is having a 4D `input`
# (NHWC)and `shared_axes`=[1, 2], so the alpha parameter is per
# channel.
"input_shape": [[1, 10, 10, 3], [3, 3, 3, 3]],
"shared_axes": [[1, 2], [1]],
},
{
# 2D-3D example. Share the 2nd axis.
"input_shape": [[20, 20], [20, 20, 20]],
"shared_axes": [[1]],
}
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
prelu = tf.keras.layers.PReLU(shared_axes=parameters["shared_axes"])
out = prelu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_shape = parameters["input_shape"]
input_values = create_tensor_data(
np.float32, input_shape, min_value=-10, max_value=10)
shared_axes = parameters["shared_axes"]
alpha_shape = []
for dim in range(1, len(input_shape)):
alpha_shape.append(1 if dim in shared_axes else input_shape[dim])
alpha_values = create_tensor_data(np.float32, alpha_shape)
# There should be only 1 trainable variable tensor.
variables = tf.all_variables()
assert len(variables) == 1
sess.run(variables[0].assign(alpha_values))
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_leaky_relu_tests(options):
"""Make a set of tests to do LeakyRelu."""
test_parameters = [
{
"input_shape": [[], [1], [5], [1, 10, 10, 3], [3, 3, 3, 3]],
"alpha": [0.1, 1.0, 2.0, -0.1, -1.0, -2.0],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.leaky_relu(input_tensor, alpha=parameters["alpha"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# This function tests various TensorFLow functions that generates Const op,
# including `tf.ones`, `tf.zeros` and random functions.
@register_make_test_function()
def make_constant_tests(options):
"""Make a set of tests to do constant ops."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[], [1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],
"constant_is_also_output": [True, False],
# This is a regression test for a bug where Toco rejects models with
# unread inputs.
"has_unread_input": [True, False],
}]
def build_graph(parameters):
dummy_input = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape"])
constant = tf.constant(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
outputs = [tf.maximum(dummy_input, constant)]
if parameters["constant_is_also_output"]:
outputs.append(constant)
inputs = [dummy_input]
if parameters["has_unread_input"]:
unread_input = tf.placeholder(
dtype=parameters["dtype"],
name="unread_input",
shape=parameters["input_shape"])
inputs.append(unread_input)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
dummy_input = np.zeros(
parameters["input_shape"], dtype=TF_TYPE_INFO[parameters["dtype"]][0])
return [dummy_input], sess.run(outputs, feed_dict={inputs[0]: dummy_input})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def make_binary_op_tests(options, binary_operator, expected_tf_failures=0):
"""Make a set of tests to do binary ops with and without broadcast."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False, True],
},
{
"dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True, False],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True, False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[0]],
"input_shape_2": [[1]],
"activation": [False],
}
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input1 = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape_1"])
input2 = tf.placeholder(
dtype=parameters["dtype"],
name="input2",
shape=parameters["input_shape_2"])
out = binary_operator(input1, input2)
if parameters["activation"]:
out = tf.nn.relu(out)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
def make_reduce_tests(reduce_op,
min_value=-10,
max_value=10,
boolean_tensor_only=False):
"""Make a set of tests to do reduce operation.
Args:
reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`.
min_value: min value for created tensor data.
max_value: max value for created tensor data.
boolean_tensor_only: If true, will only generate tensor with boolean value.
Returns:
a function representing the true generator with `reduce_op_in` curried.
"""
def f(options):
"""Actual function that generates examples."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2,
3], [3, 2, 1, 0],
[3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4,
[0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
[2, 2, 3], [-3, -3, -4], [-3, 2, 1]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [[]], # shape is: [0]
"const_axis": [False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [None], # shape is: []
"const_axis": [True],
"keepdims": [True, False],
}
]
def build_graph(parameters):
"""Build the mean op testing graph."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
input_tensor = tf.placeholder(
dtype=dtype, name="input", shape=parameters["input_shape"])
# Get axis as either a placeholder or constants.
if parameters["const_axis"]:
axis = parameters["axis"]
input_tensors = [input_tensor]
else:
if isinstance(parameters["axis"], list):
shape = [len(parameters["axis"])]
else:
shape = [] # shape for None or integers.
axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape)
input_tensors = [input_tensor, axis]
out = reduce_op(
input_tensor, axis=axis, keepdims=parameters["keepdims"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
values = [
create_tensor_data(
dtype,
parameters["input_shape"],
min_value=min_value,
max_value=max_value)
]
if not parameters["const_axis"]:
values.append(np.array(parameters["axis"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_mean_tests(options):
"""Make a set of tests to do mean."""
return make_reduce_tests(tf.reduce_mean)(options)
@register_make_test_function()
def make_sum_tests(options):
"""Make a set of tests to do sum."""
return make_reduce_tests(tf.reduce_sum)(options)
@register_make_test_function()
def make_reduce_prod_tests(options):
"""Make a set of tests to do prod."""
# set min max value to be -2, 2 to avoid overflow.
return make_reduce_tests(tf.reduce_prod, -2, 2)(options)
@register_make_test_function()
def make_reduce_max_tests(options):
"""Make a set of tests to do max."""
return make_reduce_tests(tf.reduce_max)(options)
@register_make_test_function()
def make_reduce_min_tests(options):
"""Make a set of tests to do min."""
return make_reduce_tests(tf.reduce_min)(options)
@register_make_test_function()
def make_reduce_any_tests(options):
"""Make a set of tests to do any."""
return make_reduce_tests(tf.reduce_any, boolean_tensor_only=True)(options)
@register_make_test_function()
def make_exp_tests(options):
"""Make a set of tests to do exp."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.exp(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-100, max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_cos_tests(options):
"""Make a set of tests to do cos."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the cos op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.cos(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-np.pi, max_value=np.pi)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_log_softmax_tests(options):
"""Make a set of tests to do log_softmax."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[1, 100], [4, 2], [5, 224]],
}]
def build_graph(parameters):
"""Build the log_softmax op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.nn.log_softmax(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
parameters["input_dtype"],
parameters["input_shape"],
min_value=-100,
max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_maximum_tests(options):
"""Make a set of tests to do maximum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the maximum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.maximum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
@register_make_test_function()
def make_minimum_tests(options):
"""Make a set of tests to do minimum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the minimum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.minimum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda options: make_binary_op_tests(options, binary_operator)
@register_make_test_function()
def make_add_tests(options):
make_binary_op_tests(options, tf.add)
@register_make_test_function()
def make_add_n_tests(options):
"""Make a set of tests for AddN op."""
test_parameters = [
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[2, 5, 3, 1]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[5]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[]],
"num_inputs": [2, 3, 4, 5],
},
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input_tensors = []
for i in range(parameters["num_inputs"]):
input_tensors.append(
tf.placeholder(
dtype=parameters["dtype"],
name="input_{}".format(i),
shape=parameters["input_shape"]))
out = tf.add_n(input_tensors)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input_data = []
for i in range(parameters["num_inputs"]):
input_data.append(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
return input_data, sess.run(
outputs, feed_dict={i: d for i, d in zip(inputs, input_data)})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_div_tests(options):
make_binary_op_tests(options, tf.div)
@register_make_test_function()
def make_sub_tests(options):
make_binary_op_tests(options, tf.subtract)
@register_make_test_function()
def make_mul_tests(options):
make_binary_op_tests(options, tf.multiply)
@register_make_test_function()
def make_pow_tests(options):
make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
@register_make_test_function()
def make_floor_div_tests(options):
make_binary_op_tests(options, tf.floor_div)
@register_make_test_function()
def make_floor_mod_tests(options):
make_binary_op_tests(options, tf.floormod)
@register_make_test_function()
def make_squared_difference_tests(options):
make_binary_op_tests(options, tf.squared_difference)
@register_make_test_function()
def make_gather_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[10], [1, 2, 20]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3], [5]],
"axis": [-1, 0, 1],
},
{
# TODO(b/123895910): add Nd support for strings.
"params_dtype": [tf.string],
"params_shape": [[8]],
"indices_dtype": [tf.int32],
"indices_shape": [[3]],
"axis": [0],
}
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
axis = min(len(parameters["params_shape"]), parameters["axis"])
out = tf.gather(params, indices, axis=axis)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
# Note that TF can't execute with index=1 and params_shape=[10].
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_gather_nd_tests(options):
"""Make a set of tests to do gather_nd."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 1]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[1, 1]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[2, 1], [2, 2]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5, 10]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3, 1], [2, 2], [2, 3], [2, 1, 3]],
},
]
def build_graph(parameters):
"""Build the gather_nd op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
out = tf.gather_nd(params, indices)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_gather_with_constant_tests(options):
"""Make a set of test which feed a constant to gather toco."""
test_parameters = [{
"input_shape": [[3]],
"reference_shape": [[2]],
}, {
"input_shape": [[2, 3]],
"reference_shape": [[2, 3]],
}]
def build_graph(parameters):
"""Build a graph where the inputs to Gather are constants."""
reference = tf.placeholder(
dtype=tf.int32, shape=parameters["reference_shape"])
gather_input = tf.constant(
create_tensor_data(tf.int32, parameters["input_shape"]))
gather_indices = tf.constant([0, 1], tf.int32)
out = tf.equal(reference, tf.gather(gather_input, gather_indices))
return [reference], [out]
def build_inputs(parameters, sess, inputs, outputs):
reference_values = np.zeros(parameters["reference_shape"], dtype=np.int32)
return [reference_values], sess.run(
outputs, feed_dict={inputs[0]: reference_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_embedding_lookup_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32],
"params_shape": [[10], [10, 10]],
"ids_dtype": [tf.int32],
"ids_shape": [[3], [5]],
},
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
ids = tf.placeholder(
dtype=parameters["ids_dtype"],
name="ids",
shape=parameters["ids_shape"])
out = tf.nn.embedding_lookup(params, ids)
return [params, ids], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
ids = create_tensor_data(parameters["ids_dtype"],
parameters["ids_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, ids], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, ids])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs)
@register_make_test_function()
def make_global_batch_norm_tests(options):
"""Make a set of tests to do batch_norm_with_global_normalization."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]],
"epsilon": [0.1, 0.0001],
"scale_after": [True, False],
}]
def build_graph(parameters):
"""Build the global batch norm testing graph."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
x_norm = tf.nn.batch_norm_with_global_normalization(
x, mean, variance, scale, offset,
parameters["epsilon"], parameters["scale_after"])
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fused_batch_norm_tests(options):
"""Make a set of tests to do fused_batch_norm."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2]],
"epsilon": [0.001, 0.1],
}]
def build_graph(parameters):
"""Build the testing graph for fused batch normalization."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
[x_norm, _, _] = tf.nn.fused_batch_norm(
x, scale, offset, mean, variance,
parameters["epsilon"], data_format="NHWC", is_training=False)
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_conv_tests(options):
"""Make a set of tests to do convolution."""
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True, False],
"channel_multiplier": [1, 2],
"fully_quantize": [False],
},
# TODO(b/134702301): The fully_quantize param is just ignored by the MLIR
# testing path now, resulting in duplicate tests. Either ignore these
# tests or handle it properly in the mlir_convert() function.
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True],
"channel_multiplier": [1, 2],
"fully_quantize": [True],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(
np.float32, filter_shape, min_value=-10, max_value=10)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1)
]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
# Note: This is a regression test for a bug (b/122651451) that Toco incorrectly
# erases the reduction indices array while it's shared with other ops.
@register_make_test_function()
def make_l2norm_shared_epsilon_tests(options):
"""Regression test for a bug (b/122651451)."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7]],
"dim": [1],
"epsilon": [1e-8],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
epsilon = tf.constant(parameters["epsilon"])
out1 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out2 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out = out1 + out2
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112436267) that Toco incorrectly
# fuses weights when multiple Conv2D/FULLY_CONNECTED ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 3]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
input_tensors = [input_tensor]
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
# Ensure that FuseBinaryIntoFollowingAffine works with an input which
# is shared by multiple affine ops.
conv_input = input_tensor + 0.1
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add MUL ops after Conv2D ops. These MUL ops should be fused into the
# weights of Conv2D.
result1 = result1 * 2
result2 = result2 * 3
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112303004) that Toco incorrectly
# transforms Conv into DepthwiseConv when two Conv ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_to_depthwiseconv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 1]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [3],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
input_tensors = [input_tensor]
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_depthwiseconv_tests(options):
"""Make a set of tests to do convolution."""
# Tensorflow only supports equal strides
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"channel_multiplier": [1, 2],
"rate": [[1, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
},
{
"input_shape": [[1, 3, 4, 3]],
"filter_size": [[1, 1]],
"strides": [[1, 1, 2, 1]], # TF needs [1, x, x, 1]
"dilations": [[1, 1, 1, 1], [1, 2, 2, 1]],
"channel_multiplier": [2],
"rate": [[2, 2]], # Only [1, 1] is supported
"padding": ["SAME"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a depthwise conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(np.float32, filter_shape)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.depthwise_conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
rate=parameters["rate"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_split_tests(options):
"""Make a set of tests to do tf.split."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"num_or_size_splits": [1, 2, 3, 4, 5],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(
input_tensor, parameters["num_or_size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=112)
@register_make_test_function()
def make_splitv_tests(options):
"""Make a set of tests to do tf.split_v."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"size_splits": [[2, 2], [1, 3], [4, 2], [5, 3],
[-1, 1], [-1, 2], [-1, 4]],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(input_tensor, parameters["size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=158)
@register_make_test_function()
def make_concat_tests(options):
"""Make a set of tests to do concatenation."""
test_parameters = [{
"base_shape": [[1, 3, 4, 3], [3, 4]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3, -3, -2, -1],
"type": [tf.float32, tf.uint8, tf.int32, tf.int64],
}]
def get_shape(parameters, delta):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < 0:
axis += len(shape)
if axis < len(shape):
shape[axis] += delta
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(dtype=parameters["type"],
name=("input%d" % n),
shape=get_shape(parameters, n))
all_tensors.append(input_tensor)
out = tf.concat(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for n in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(
parameters["type"], get_shape(parameters, n))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
@register_make_test_function()
def make_fully_connected_tests(options):
"""Make a set of tests to do fully_connected."""
test_parameters = [{
"shape1": [[3, 3]],
"shape2": [[3, 3]],
"transpose_a": [True, False],
"transpose_b": [True, False],
"constant_filter": [True, False],
}, {
"shape1": [[4, 4], [1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[40, 37]],
"transpose_a": [False],
"transpose_b": [True],
"constant_filter": [True, False],
}, {
"shape1": [[5, 3]],
"shape2": [[5, 3]],
"transpose_a": [True],
"transpose_b": [False],
"constant_filter": [True, False],
}]
def build_graph(parameters):
"""Build a matmul graph given `parameters`."""
input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1",
shape=parameters["shape1"])
# Get input_tensor2 either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
input_tensor2 = create_tensor_data(np.float32, parameters["shape2"])
input_tensors = [input_tensor1]
else:
input_tensor2 = tf.placeholder(
dtype=tf.float32, name="input2", shape=parameters["shape2"])
input_tensors = [input_tensor1, input_tensor2]
out = tf.matmul(input_tensor1, input_tensor2,
transpose_a=parameters["transpose_a"],
transpose_b=parameters["transpose_b"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input_values1) or 2
# tensors (input_values1, input_values2) based on whether the second input
# is a constant or variable input.
values = [create_tensor_data(np.float32, shape=parameters["shape1"])]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, parameters["shape2"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=10)
@register_make_test_function()
def make_l2norm_tests(options):
"""Make a set of tests to do l2norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"dim": [0, 1, 2, 3, [2, 3], -2],
"epsilon": [None, 1e-12, 1e-3],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
if parameters["epsilon"]:
out = tf.nn.l2_normalize(
input_tensor, parameters["dim"], epsilon=parameters["epsilon"])
else:
out = tf.nn.l2_normalize(input_tensor, parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_local_response_norm_tests(options):
"""Make a set of tests to do local_response_norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
"depth_radius": [None, 0, 1, 3, 5],
"bias": [None, 0.3, -0.1],
"alpha": [None, 2, -3],
"beta": [None, 0.25, 2],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.local_response_normalization(
input_tensor, depth_radius=parameters["depth_radius"],
bias=parameters["bias"], alpha=parameters["alpha"],
beta=parameters["beta"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pad_tests(options):
"""Make a set of tests to do pad."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[1, 2]]],
"constant_paddings": [False],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_padv2_tests(options):
"""Make a set of tests to do padv2."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[0, 1]]],
"constant_paddings": [False],
"constant_values": [0, 2],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings,
constant_values=parameters["constant_values"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reshape_tests(options):
"""Make a set of tests to do reshape."""
# All shapes below are suitable for tensors with 420 elements.
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],
"output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]],
"constant_shape": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1]],
"output_shape": [[]],
"constant_shape": [True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
# Get shape as either a placeholder or constants.
if parameters["constant_shape"]:
output_shape = parameters["output_shape"]
input_tensors = [input_tensor]
else:
# The shape of the shape tensor.
shape_tensor_shape = [len(parameters["output_shape"])]
output_shape = tf.placeholder(
dtype=tf.int32, name="output_shape", shape=shape_tensor_shape)
input_tensors = [input_tensor, output_shape]
out = tf.reshape(input_tensor, shape=output_shape)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_shape_tests(options):
"""Make a set of tests to do shape."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
"out_type": [tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the shape op testing graph."""
# Note that we intentionally leave out the shape from the input placeholder
# to prevent the Shape operation from being optimized out during conversion.
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.shape(input_value, out_type=parameters["out_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_rank_tests(options):
"""Make a set of tests to do rank."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
}]
def build_graph(parameters):
"""Build the rank op testing graph."""
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.rank(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_one_hot_tests(options):
"""Make a set of tests to do one_hot."""
test_parameters = [{
"indices_type": [tf.int32, tf.int64],
"indices_shape": [[3], [4, 4], [1, 5], [5, 1]],
"axis": [0, 1],
"dtype": [tf.int32, tf.int64, tf.float32],
"provide_optional_inputs": [True, False],
}]
def build_graph(parameters):
indices = tf.placeholder(
dtype=parameters["indices_type"],
name="indices",
shape=parameters["indices_shape"])
depth = tf.placeholder(dtype=tf.int32, name="depth", shape=())
if not parameters["provide_optional_inputs"]:
out = tf.one_hot(indices=indices, depth=depth)
return [indices, depth], [out]
on_value = tf.placeholder(
dtype=parameters["dtype"], name="on_value", shape=())
off_value = tf.placeholder(
dtype=parameters["dtype"], name="off_value", shape=())
out = tf.one_hot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=parameters["axis"],
dtype=parameters["dtype"])
return [indices, depth, on_value, off_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(
parameters["indices_type"],
shape=parameters["indices_shape"],
min_value=-1,
max_value=10),
create_tensor_data(tf.int32, shape=None, min_value=1, max_value=10),
]
if parameters["provide_optional_inputs"]:
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=1, max_value=10))
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=-1, max_value=0))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_bilinear_tests(options):
"""Make a set of tests to do resize_bilinear."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.image.resize_bilinear(input_tensor, size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_nearest_neighbor_tests(options):
"""Make a set of tests to do resize_nearest_neighbor."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.image.resize_nearest_neighbor(
input_tensor,
size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sigmoid_tests(options):
"""Make a set of tests to do sigmoid."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.sigmoid(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_softmax_tests(options):
"""Make a set of tests to do softmax."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [2, 3]],
"dim": [-1, 0],
}, {
"dtype": [tf.float32],
"input_shape": [[4, 7]],
"dim": [-1, 1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.nn.softmax(input_tensor, dim=parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_depth_tests(options):
"""Make a set of tests to do space_to_depth."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.uint8, tf.int64],
"input_shape": [[2, 12, 24, 1]],
"block_size": [2, 3, 4],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_depth_to_space_tests(options):
"""Make a set of tests to do depth_to_space."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.uint8, tf.int64],
"input_shape": [[2, 3, 4, 16]],
"block_size": [2, 4],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.depth_to_space(input_tensor, block_size=parameters["block_size"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_batch_nd_tests(options):
"""Make a set of tests to do space_to_batch_nd."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
{
"dtype": [tf.float32],
"input_shape": [[2, 3, 7, 3]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
# Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[1, 4, 4, 4, 1, 1]],
"block_shape": [[2, 2, 2]],
"paddings": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
]
def build_graph(parameters):
"""Build a space_to_batch graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get paddings either as a const or as a placeholder (tensor).
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape)
input_tensors.append(paddings)
out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=56)
@register_make_test_function()
def make_batch_to_space_nd_tests(options):
"""Make a set of tests to do batch_to_space_nd."""
test_parameters = [
{
"dtype": [tf.float32, tf.int64, tf.int32],
"input_shape": [[12, 3, 3, 1]],
"block_shape": [[1, 4], [2, 2], [3, 4]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
# Single batch (no-op)
{
"dtype": [tf.float32],
"input_shape": [[1, 3, 3, 1]],
"block_shape": [[1, 1]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True],
"constant_crops": [True],
},
# Non-4D use case: 1 batch dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[8, 2, 2, 2, 1, 1]],
"block_shape": [[2, 2, 2]],
"crops": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
]
def build_graph(parameters):
"""Build a batch_to_space graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get crops either as a const or as a placeholder (tensor).
if parameters["constant_crops"]:
crops = parameters["crops"]
else:
shape = [len(parameters["crops"]), 2]
crops = tf.placeholder(dtype=tf.int32, name="crops", shape=shape)
input_tensors.append(crops)
out = tf.batch_to_space_nd(input_tensor, block_shape, crops)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_crops"]:
values.append(np.array(parameters["crops"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_transpose_tests(options):
"""Make a set of tests to do transpose."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[2, 2, 3]],
"perm": [[0, 1, 2], [0, 2, 1]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4]],
"perm": [[0, 1, 2, 3], [3, 0, 1, 2]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4, 5]],
"perm": [[4, 3, 2, 1, 0]],
"constant_perm": [True, False],
}]
def build_graph(parameters):
"""Build a transpose graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_perm"]:
perm = parameters["perm"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["perm"]), 2]
perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape)
input_tensors = [input_tensor, perm]
out = tf.transpose(input_tensor, perm=perm)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_perm"]:
values.append(np.array(parameters["perm"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_squeeze_tests(options):
"""Make a set of tests to do squeeze."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 2, 1, 3, 1, 4, 1, 1]],
"axis": [
None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2],
[-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6],
[0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5]
],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1]],
"axis": [None, [], [0], [-1]],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 1, 1, 1, 1]],
"axis": [None, [], [0], [3, 0], [-2, 0, 3, 2]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_squeeze_transpose_tests(options):
"""Make a set of tests to do squeeze followed by transpose."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 4, 10, 1]],
"axis": [[-1], [3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
out = tf.transpose(out, perm=[1, 2])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
def _make_strided_slice_tests(options, test_parameters,
expected_tf_failures=0):
"""Utility function to make strided_slice_tests based on parameters."""
def build_graph(parameters):
"""Build graph for stride_slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_indices"]:
begin = parameters["begin"]
end = parameters["end"]
strides = parameters["strides"]
tensors = [input_tensor]
else:
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
end = tf.placeholder(
dtype=parameters["index_type"],
name="end",
shape=[len(parameters["input_shape"])])
strides = (
tf.placeholder(
dtype=parameters["index_type"],
name="strides",
shape=[len(parameters["input_shape"])])
if parameters["strides"] is not None else None)
tensors = [input_tensor, begin, end]
if strides is not None:
tensors.append(strides)
out = tf.strided_slice(
input_tensor,
begin,
end,
strides,
begin_mask=parameters["begin_mask"],
end_mask=parameters["end_mask"])
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for stride_slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = TF_TYPE_INFO[parameters["index_type"]][0]
values = [input_values]
if not parameters["constant_indices"]:
begin_values = np.array(parameters["begin"]).astype(index_type)
end_values = np.array(parameters["end"]).astype(index_type)
stride_values = (
np.array(parameters["strides"]).astype(index_type)
if parameters["strides"] is not None else None)
values.append(begin_values)
values.append(end_values)
if stride_values is not None:
values.append(stride_values)
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
@register_make_test_function()
def make_strided_slice_tests(options):
"""Make a set of tests to do strided_slice."""
# TODO(soroosh): add test/support for uint8.
test_parameters = [
# 4-D (basic cases with const/non-const indices).
{
"dtype": [tf.float32, tf.int32, tf.int64],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin": [[0, 0, 0, 0]],
"end": [[12, 2, 2, 5]],
"begin_mask": [None],
"end_mask": [None],
"shrink_axis_mask": [None],
"constant_indices": [False, True],
},
# 4-D with non-trivial begin & end.
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"end": [[8, 2, 2, 3], [12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin_mask": [None, 8],
"end_mask": [None, 3],
"shrink_axis_mask": [None, 15, -1],
"constant_indices": [True],
},
# Begin, end, strides dim are different from input shape
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0]],
"end": [[1]],
"strides": [None, [1]],
"begin_mask": [0],
"end_mask": [0],
"shrink_axis_mask": [1],
"constant_indices": [True],
},
# 2-D
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, 0]],
"end": [[2, 2]],
"strides": [None, [2, 2]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False, True],
},
# Negative strides
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, -1]],
"end": [[2, -3]],
"strides": [[1, -1]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters, expected_tf_failures=2)
@register_make_test_function()
def make_strided_slice_1d_exhaustive_tests(options):
"""Make a set of exhaustive tests for 1D strided_slice."""
test_parameters = [
# 1-D Exhaustive
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[3]],
"begin": [[-2], [-1], [0], [1], [2]],
"end": [[-2], [-1], [0], [1], [2]],
"strides": [[-2], [-1], [1], [2]],
"begin_mask": [0, 1],
"end_mask": [0, 1],
"shrink_axis_mask": [0],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters)
# TODO(b/137615945): Expand the test coverage of this one and remove the old
# ones.
@register_make_test_function()
def make_strided_slice_np_style_tests(options):
"""Make a set of tests to test strided_slice in np style."""
test_parameters = [
{
"dtype": [tf.float32],
"shape": [[12, 7], [33, 1]],
"spec": [[slice(3, 7, 2), slice(None)],
[tf.newaxis,
slice(3, 7, 1), tf.newaxis,
slice(None)], [slice(1, 5, 1), slice(None)]],
},
# 1-D case
{
"dtype": [tf.float32],
"shape": [[44]],
"spec": [[slice(3, 7, 2)], [tf.newaxis, slice(None)]],
},
# Shrink mask.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[slice(3, 7, 2), slice(None), 2]],
},
# Ellipsis.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[slice(3, 7, 2), Ellipsis]],
},
# All combinations.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[tf.newaxis,
slice(3, 7, 2),
slice(None), Ellipsis]],
},
]
def build_strided_slice_spec(parameters):
"""Build strided_slice spec.
Args:
parameters: Test configurations.
Returns:
strided_slice spec, e.g., [2:3, :] or [tf.newaxis, :, tf.newaxis].
"""
def build_graph(parameters):
"""Build a simple graph with np style strided_slice."""
input_value = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"])
out = input_value.__getitem__(parameters["spec"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"], parameters["shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# For verifying https://github.com/tensorflow/tensorflow/issues/23599
# TODO(chaomei): refactor the test to cover more cases, like negative stride,
# negative array index etc.
@register_make_test_function()
def make_resolve_constant_strided_slice_tests(options):
"""Make a set of tests to show strided_slice yields incorrect results."""
test_parameters = [{
"unused_iteration_counter": [1],
}]
def build_graph(parameters):
"""Build the strided_slice op testing graph."""
del parameters
input_values = tf.placeholder(dtype=tf.float32, shape=[4, 2])
data = tf.constant([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]], tf.float32)
return [input_values], [input_values + data[:, :2]]
def build_inputs(parameters, sess, inputs, outputs):
del parameters
input_values = np.zeros([4, 2], dtype=np.float32)
return [input_values], sess.run(
outputs, feed_dict={inputs[0]: input_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_lstm_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batchs": [1],
"time_step_size": [1],
"input_vec_size": [3],
"num_cells": [4],
"split_tflite_lstm_inputs": [False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in xrange(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batchs, input_vec_size])
inputs_after_split.append(one_timestamp_input)
# Currently lstm identifier has a few limitations: only supports
# forget_bias == 0, inner state activation == tanh.
# TODO(zhixianyan): Add another test with forget_bias == 1.
# TODO(zhixianyan): Add another test with relu as activation.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_cells, forget_bias=0.0, state_is_tuple=True)
cell_outputs, _ = rnn.static_rnn(
lstm_cell, inputs_after_split, dtype=tf.float32)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(
parameters["dtype"], [kernel.shape[0], kernel.shape[1]], -1, 1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in xrange(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batchs, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
# TODO(zhixianyan): Automatically generate rnn_states for lstm cell.
extra_toco_options = ExtraTocoOptions()
extra_toco_options.rnn_states = (
"{state_array:rnn/BasicLSTMCellZeroState/zeros,"
"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},"
"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,"
"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}")
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
use_frozen_graph=True)
def make_l2_pool(input_tensor, ksize, strides, padding, data_format):
"""Given an input perform a sequence of TensorFlow ops to produce l2pool."""
return tf.sqrt(tf.nn.avg_pool(
tf.square(input_tensor), ksize=ksize, strides=strides,
padding=padding, data_format=data_format))
@register_make_test_function()
def make_topk_tests(options):
"""Make a set of tests to do topk."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[10], [5, 20]],
"input_k": [None, 1, 3],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["input_k"] is not None:
k = tf.placeholder(dtype=tf.int32, name="input_k", shape=[])
inputs = [input_value, k]
else:
k = tf.constant(3, name="k")
inputs = [input_value]
out = tf.nn.top_k(input_value, k)
return inputs, [out[1]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
if parameters["input_k"] is not None:
k = np.array(parameters["input_k"], dtype=np.int32)
return [input_value, k], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value, k])))
else:
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_arg_min_max_tests(options):
"""Make a set of tests to do arg_max."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],
"output_type": [tf.int32, tf.int64],
"is_arg_max": [True],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
axis = random.randint(0, max(len(parameters["input_shape"]) - 1, 0))
if parameters["is_arg_max"]:
out = tf.arg_max(input_value, axis, output_type=parameters["output_type"])
else:
out = tf.arg_min(input_value, axis, output_type=parameters["output_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_equal_tests(options):
"""Make a set of tests to do equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([], []),
([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_not_equal_tests(options):
"""Make a set of tests to do not equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the not euqal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.not_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_tests(options):
"""Make a set of tests to do greater."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_equal_tests(options):
"""Make a set of tests to do greater_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_tests(options):
"""Make a set of tests to do less."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_equal_tests(options):
"""Make a set of tests to do less_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_floor_tests(options):
"""Make a set of tests to do floor."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the floor op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.floor(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_ceil_tests(options):
"""Make a set of tests to do ceil."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the ceil op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.ceil(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_round_tests(options):
"""Build the round op testing graph."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the round op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.round(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_neg_tests(options):
"""Make a set of tests to do neg."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [5], []],
}]
def build_graph(parameters):
"""Build the neg op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.negative(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_zeros_like_tests(options):
"""Make a set of tests to do zeros_like."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the zeros_like op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
zeros = tf.zeros_like(input_tensor)
# This maximum node is so that toco can perform the constants-propagation
# through the above zeros_like, which it can't do if the output of the
# zeros_like as an output of the whole graphs (graph outputs can't be
# constants). If toco does not perform such constants-propagation then
# the resulting tflite graph retains the zeros_like as a Fill op, which
# is unsupported by TFLite, even as a custom op.
out = tf.maximum(zeros, input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_cast_tests(options):
"""Generate examples for cast."""
test_parameters = [{
"input_dtype": [tf.int32],
"output_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the cast testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.cast(input_value, parameters["output_dtype"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def _make_elementwise_tests(op):
"""Make a set of tests to do element-wise operations."""
def f(options):
"""Actual function that generates examples."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the unary op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = op(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_sin_tests(options):
"""Make a set of tests to do sin."""
return _make_elementwise_tests(tf.sin)(options)
@register_make_test_function()
def make_log_tests(options):
"""Make a set of tests to do log."""
return _make_elementwise_tests(tf.log)(options)
@register_make_test_function()
def make_sqrt_tests(options):
"""Make a set of tests to do sqrt."""
return _make_elementwise_tests(tf.sqrt)(options)
@register_make_test_function()
def make_rsqrt_tests(options):
"""Make a set of tests to do 1/sqrt."""
return _make_elementwise_tests(tf.rsqrt)(options)
@register_make_test_function()
def make_square_tests(options):
"""Make a set of tests to do square."""
return _make_elementwise_tests(tf.square)(options)
@register_make_test_function()
def make_where_tests(options):
"""Make a set of tests to do where."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 4]),],
"use_where_v2": [False, True],
},
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 1]),],
"use_where_v2": [True],
},
]
def build_graph(parameters):
"""Build the where op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_set"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input3",
shape=parameters["input_shape_set"][1])
less = tf.less(input_value1, input_value2)
where = tf.where_v2 if parameters["use_where_v2"] else tf.where
out = where(less, input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_slice_tests(options):
"""Make a set of tests to do slice."""
# TODO(renjieliu): add test/support for uint8.
test_parameters = [
# 4-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"size": [[8, 2, 2, 3], [11, 2, 1, 5]],
},
# 2-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[2, 3]],
"begin": [[0, 0], [1, 0]],
"size": [[2, 3], [2, 2]],
},
# 4-D with size -1
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[4, 4, 4, 4]],
"begin": [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
[0, 0, 0, 1]],
"size": [[-1, 1, 1, 1], [1, -1, 1, 1], [1, 1, -1, 1], [1, 1, 1, -1]],
},
# last dimension out of index
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[4, 4, 4]],
"begin": [[3, 3, 4]],
"size": [[-1, -1, -1]],
},
]
def build_graph(parameters):
"""Build graph for slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
size = tf.placeholder(
dtype=parameters["index_type"],
name="size",
shape=[len(parameters["input_shape"])])
tensors = [input_tensor, begin, size]
out = tf.slice(input_tensor, begin, size)
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = TF_TYPE_INFO[parameters["index_type"]][0]
begin_values = np.array(parameters["begin"]).astype(index_type)
size_values = np.array(parameters["size"]).astype(index_type)
values = [input_values, begin_values, size_values]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=24)
@register_make_test_function()
def make_conv2d_transpose_tests(options):
"""Make a set of tests to do transpose_conv."""
test_parameters = [{
"input_shape": [[1, 50, 54, 3]],
"filter_shape": [[1, 1, 8, 3], [1, 2, 8, 3], [1, 3, 8, 3], [1, 4, 8, 3]],
"output_shape": [[1, 100, 108, 8]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 16, 1, 512]],
"filter_shape": [[4, 1, 512, 512]],
"output_shape": [[1, 32, 1, 512]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 128, 128, 1]],
"filter_shape": [[4, 4, 1, 1]],
"output_shape": [[1, 256, 256, 1]],
"dynamic_output_shape": [True, False],
}]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_tensor = tf.placeholder(
dtype=tf.float32, name="filter", shape=parameters["filter_shape"])
input_tensors = [input_tensor, filter_tensor]
if parameters["dynamic_output_shape"]:
output_shape = tf.placeholder(dtype=tf.int32, shape=[4])
input_tensors.append(output_shape)
else:
output_shape = parameters["output_shape"]
out = tf.nn.conv2d_transpose(
input_tensor,
filter_tensor,
output_shape=output_shape,
padding="SAME",
strides=(1, 2, 2, 1))
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(np.float32, parameters["input_shape"]),
create_tensor_data(np.float32, parameters["filter_shape"])
]
if parameters["dynamic_output_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Since compute output_shape is fairly complicated for
# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a
# "conv2d" operation to get the output, then we use the output to feed in
# tf.nn.conv2d_backprop_input.
# This test will depend on the "conv2d" operation's correctness.
@register_make_test_function()
def make_transpose_conv_tests(options):
"""Make a set of tests to do transpose_conv."""
# Tensorflow only supports equal strides
test_parameters = [{
"input_shape": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"channel_multiplier": [1, 2],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
conv_outputs = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
out = tf.nn.conv2d_backprop_input(
input_shape,
filter_input,
conv_outputs,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
input_tensors = [input_tensor, filter_input]
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape),
create_tensor_data(np.float32, filter_shape)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_tile_tests(options):
"""Make a set of tests to do tile."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.bool],
"input_shape": [[3, 2, 1], [2, 2, 2]],
"multiplier_dtype": [tf.int32, tf.int64],
"multiplier_shape": [[3]]
}]
def build_graph(parameters):
"""Build the tile op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
shape=parameters["input_shape"],
name="input")
multiplier_value = tf.placeholder(
dtype=parameters["multiplier_dtype"],
shape=parameters["multiplier_shape"],
name="multiplier")
out = tf.tile(input_value, multiplier_value)
return [input_value, multiplier_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
multipliers_value = create_tensor_data(
parameters["multiplier_dtype"],
parameters["multiplier_shape"],
min_value=0)
return [input_value, multipliers_value], sess.run(
outputs,
feed_dict={
inputs[0]: input_value,
inputs[1]: multipliers_value
})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_expand_dims_tests(options):
"""Make a set of tests to do expand_dims."""
test_parameters = [{
"input_type": [tf.float32, tf.int32],
"input_shape": [[5, 4]],
"axis_value": [0, 1, 2, -1, -2, -3],
"constant_axis": [True, False],
}]
def build_graph(parameters):
"""Build the where op testing graph."""
inputs = []
input_value = tf.placeholder(
dtype=parameters["input_type"],
name="input",
shape=parameters["input_shape"])
inputs.append(input_value)
if parameters["constant_axis"]:
axis_value = tf.constant(
parameters["axis_value"], dtype=tf.int32, shape=[1])
else:
axis_value = tf.placeholder(dtype=tf.int32, name="axis", shape=[1])
inputs.append(axis_value)
out = tf.expand_dims(input_value, axis=axis_value)
return inputs, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
input_values.append(
create_tensor_data(parameters["input_type"], parameters["input_shape"]))
if not parameters["constant_axis"]:
input_values.append(np.array([parameters["axis_value"]], dtype=np.int32))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sparse_to_dense_tests(options):
"""Make a set of tests to do sparse to dense."""
test_parameters = [{
"value_dtype": [tf.float32, tf.int32, tf.int64],
"index_dtype": [tf.int32, tf.int64],
"value_count": [1, 3, 6, 8],
"dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],
"default_value": [0, -1],
"value_is_scalar": [True, False],
}]
# Return a single value for 1-D dense shape, but a tuple for other shapes.
def generate_index(dense_shape):
if len(dense_shape) == 1:
return np.random.randint(dense_shape[0])
else:
index = []
for shape in dense_shape:
index.append(np.random.randint(shape))
return tuple(index)
def build_graph(parameters):
"""Build the sparse_to_dense op testing graph."""
dense_shape = parameters["dense_shape"]
# Special handle for value_is_scalar case.
# value_count must be 1.
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
value = tf.placeholder(
name="value", dtype=parameters["value_dtype"], shape=())
else:
value = tf.placeholder(
name="value",
dtype=parameters["value_dtype"],
shape=[parameters["value_count"]])
indices = set()
while len(indices) < parameters["value_count"]:
indices.add(generate_index(dense_shape))
indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"])
# TODO(renjieliu): Add test for validate_indices case.
out = tf.sparse_to_dense(
indices,
dense_shape,
value,
parameters["default_value"],
validate_indices=False)
return [value], [out]
def build_inputs(parameters, sess, inputs, outputs):
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
input_value = create_scalar_data(parameters["value_dtype"])
else:
input_value = create_tensor_data(parameters["value_dtype"],
[parameters["value_count"]])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pack_tests(options):
"""Make a set of tests to do stack."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int64],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [5],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
}
]
def get_shape(parameters):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < len(shape):
shape[axis] += parameters["additional_shape"]
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name=("input%d" % n),
shape=get_shape(parameters))
all_tensors.append(input_tensor)
out = tf.stack(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for _ in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(np.float32, get_shape(parameters))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=72)
@register_make_test_function()
def make_unpack_tests(options):
"""Make a set of tests to do unstack."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.unstack(input_tensor, axis=get_valid_axis(parameters))
return [input_tensor], [outs[0]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_range_tests(options):
"""Make a set of tests to do range."""
test_parameters = [{
"dtype": [tf.int32, tf.float32],
"offset": [10, 100, 1000],
"delta": [1, 2, 3, 4, -1, -2, -3, -4],
}]
def build_graph(parameters):
"""Build the range op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"], name=("start"), shape=[])
if parameters["delta"] < 0:
offset = parameters["offset"] * -1
else:
offset = parameters["offset"]
delta = parameters["delta"]
limit_tensor = input_tensor + offset
delta_tensor = tf.constant(delta, dtype=parameters["dtype"])
out = tf.range(input_tensor, limit_tensor, delta_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_scalar_data(parameters["dtype"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fill_tests(options):
"""Make a set of tests to do fill."""
test_parameters = [{
"dims_dtype": [tf.int32, tf.int64],
"dims_shape": [[], [1], [3], [3, 3]],
"value_dtype": [tf.int32, tf.int64, tf.float32],
}]
def build_graph(parameters):
"""Build the fill op testing graph."""
input1 = tf.placeholder(
dtype=parameters["dims_dtype"],
name="dims",
shape=parameters["dims_shape"])
input2 = tf.placeholder(
dtype=parameters["value_dtype"], name="value", shape=[])
out = tf.fill(input1, input2)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dims_dtype"],
parameters["dims_shape"], 1)
input2 = create_scalar_data(parameters["value_dtype"])
return [input1, input2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input1, input2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
def _make_logical_tests(op):
"""Make a set of tests to do logical operations."""
def logical(options, expected_tf_failures=0):
"""Generate examples."""
test_parameters = [{
"input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the logical testing graph."""
input_value1 = tf.placeholder(
dtype=tf.bool, name="input1", shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=tf.bool, name="input2", shape=parameters["input_shape_pair"][1])
out = op(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return logical
@register_make_test_function()
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_and_tests(options):
"""Make a set of tests to do logical_and."""
return _make_logical_tests(tf.logical_and)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_xor_tests(options):
"""Make a set of tests to do logical_xor.
Test logical_not as well.
"""
return _make_logical_tests(tf.logical_xor)(options, expected_tf_failures=1)
@register_make_test_function()
def make_mirror_pad_tests(options):
"""Make a set of tests to do mirror_pad."""
test_parameters = [
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [1, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["const"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[3, 2, 4, 5]],
"padding_matrix": [[[1, 1], [2, 2], [1, 1], [1, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["type"] != "const":
padding_matrix = tf.placeholder(
dtype=tf.int32,
name="padding",
shape=[len(parameters["input_shape"]), 2])
input_tensors = [input_tensor, padding_matrix]
else:
padding_matrix = tf.constant(np.array(parameters["padding_matrix"]))
input_tensors = [input_tensor]
output = tf.pad(
input_tensor, paddings=padding_matrix, mode=parameters["mode"])
return input_tensors, [output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
if parameters["type"] != "const":
input_values.append(np.array(parameters["padding_matrix"]))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unroll_batch_matmul_tests(options):
"""Make a set of tests to test unroll_batch_matmul."""
# The test cases below requires broadcasting support (BatchMatMulV2 semantic),
# whis isn't supported as of this change.
broadcast_shape_params = [
# Simple broadcast.
[(1, 2, 3), (3, 5), False, False],
# Empty batch broadcast.
[(2, 5, 3), (3, 7), False, False],
# Single batch with non-empty batch broadcast.
[(1, 5, 3), (4, 3, 7), False, False],
# Broadcast both operands
[(3, 1, 5, 3), (1, 4, 3, 7), False, False],
]
test_parameters = [{
"dtype": [tf.float32],
"shape": [
[(2, 2, 3), (2, 3, 2), False, False],
[(2, 2, 3), (2, 3, 2), True, True],
[(2, 2, 3), (2, 2, 3), False, True],
[(2, 2, 3), (2, 2, 3), True, False],
[(4, 2, 2, 3), (4, 2, 3, 2), False, False],
[(4, 2, 2, 3), (4, 2, 3, 2), True, True],
[(4, 2, 2, 3), (4, 2, 2, 3), False, True],
[(4, 2, 2, 3), (4, 2, 2, 3), True, False]
] + broadcast_shape_params,
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops.
"forward_compatibility_test": [False, True],
}]
def build_graph(parameters):
"""Build the batch_matmul op testing graph."""
def _build_graph():
input_tensor1 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][0])
input_tensor2 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][1])
# Should be unrolled and replaced with fully_connected ops in the end.
out = tf.matmul(
input_tensor1,
input_tensor2,
transpose_a=parameters["shape"][2],
transpose_b=parameters["shape"][3])
return [input_tensor1, input_tensor2], [out]
if parameters["forward_compatibility_test"]:
# This is hardcoded to the date after MatMulV2 is activated.
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops, and remove the hardcoded date.
with tf.compat.forward_compatibility_horizon(2019, 4, 26):
return _build_graph()
else:
return _build_graph()
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][0])
input_value2 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_placeholder_with_default_tests(options):
"""Make a set of tests to test placeholder_with_default."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the placeholder_with_default testing graph."""
const_node = tf.constant(
[1, 2, 2, 0], shape=[2, 2], dtype=parameters["dtype"])
input_tensor = tf.placeholder_with_default(
const_node, shape=[2, 2], name="input")
out = tf.equal(input_tensor, const_node, name="output")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
numpy_type = TF_TYPE_INFO[parameters["dtype"]][0]
input_value = np.array([[1, 0], [2, 1]], numpy_type)
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unique_tests(options):
"""Make a set of tests for Unique op."""
test_parameters = [
{
"input_shape": [[1]],
"index_type": [tf.int32, tf.int64, None],
"input_values": [3]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 2, 3]]
},
{
"input_shape": [[7]],
"index_type": [tf.int32, tf.int64],
"input_values": [[1, 1, 1, 1, 1, 1, 1]]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 0, -1]]
}]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["index_type"] is None:
output = tf.unique(input_tensor)
else:
output = tf.unique(input_tensor, parameters["index_type"])
return [input_tensor], output
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_v2_tests(options):
"""Make a set of tests to do reverse_v2."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.reverse(input_tensor, axis=[get_valid_axis(parameters)])
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_sequence_tests(options):
"""Make a set of tests to do reverse_sequence."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[8, 4, 5, 5, 6], [4, 4, 3, 5]],
"seq_lengths": [[2, 2, 2, 2], [2, 1, 1, 0]],
"seq_axis": [0, 3],
"batch_axis": [1]
},
{
"input_dtype": [tf.float32],
"input_shape": [[2, 4, 5, 5, 6]],
"seq_lengths": [[2, 1]],
"seq_axis": [2],
"batch_axis": [0]
},
{
"input_dtype": [tf.float32],
"input_shape": [[4, 2]],
"seq_lengths": [[3, 1]],
"seq_axis": [0],
"batch_axis": [1]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.reverse_sequence(
input_value,
seq_lengths=parameters["seq_lengths"],
batch_axis=parameters["batch_axis"],
seq_axis=parameters["seq_axis"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_diag_tests(options):
"""Make a set of tests for tf.linalg.diag op."""
test_parameters = [
{
"input_shape": [[3], [2, 3], [3, 4, 5], [2, 4, 6, 8]],
"input_dtype": [tf.int32, tf.float32],
},
]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.matrix_diag(input_tensor)
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_set_diag_tests(options):
"""Make a set of tests for tf.linalg.set_diag op."""
test_parameters = [
{
"input_diag_shapes": [([3, 3], [3]), ([2, 3], [2]), ([2, 4, 4],
[2, 4]),
([3, 4, 5, 6], [3, 4, 5])],
"input_dtype": [tf.int32, tf.float32, tf.uint8],
},
]
def build_graph(parameters):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="input", shape=input_shape)
diag_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="diagonal", shape=diag_shape)
outs = tf.matrix_set_diag(input_tensor, diag_tensor)
return [input_tensor, diag_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_values = create_tensor_data(parameters["input_dtype"], input_shape)
diag_values = create_tensor_data(parameters["input_dtype"], diag_shape)
return [input_values, diag_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values, diag_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_eye_tests(options):
"""Make a set of tests for tf.eye op."""
test_parameters = [{
"num_rows_shape": [[]],
"num_cols_shape": [[]],
"batch_shape": [[3], [2, 4], [4, 5, 6], None],
"use_num_cols": [True, False],
"dtype": [tf.float32, tf.int32],
}]
def build_graph(parameters):
input_tensor0 = tf.placeholder(
dtype=tf.int32, name="num_rows", shape=parameters["num_rows_shape"])
input_tensor1 = tf.placeholder(
dtype=tf.int32, name="num_columns", shape=parameters["num_cols_shape"])
if parameters["use_num_cols"]:
outs = tf.eye(
num_rows=input_tensor0,
num_columns=input_tensor1,
batch_shape=parameters["batch_shape"],
dtype=parameters["dtype"])
return [input_tensor0, input_tensor1], [outs]
else:
outs = tf.eye(num_rows=input_tensor0, dtype=parameters["dtype"])
return [input_tensor0], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value0 = create_scalar_data(dtype=np.int32, min_value=1)
input_value1 = create_scalar_data(dtype=np.int32, min_value=1)
if parameters["use_num_cols"]:
return [input_value0, input_value1], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0, input_value1])))
else:
return [input_value0], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_lstm_tests(options):
"""Make a set of tests to do unidirectional_sequence_lstm."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"use_peepholes": [False, True],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"],
use_peepholes=parameters["use_peepholes"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
lstm_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"], use_peepholes=parameters["use_peepholes"])
outs, _ = tf.nn.static_rnn(lstm_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence lstm, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function(name="make_unidirectional_sequence_rnn_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_rnn_tests(options):
"""Make a set of tests to do unidirectional_sequence_rnn."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
rnn_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.nn.static_rnn(rnn_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence rnn, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_unfused_gru_tests(options):
"""Make a set of tests for unfused gru op."""
test_parameters = [{
"units": [2, 5],
"batch_size": [1, 2],
"time": [3],
}]
def build_graph(parameters):
inputs = [
tf.placeholder(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
cell_fw = tf.nn.rnn_cell.GRUCell(parameters["units"])
cell_bw = tf.nn.rnn_cell.GRUCell(parameters["units"])
outputs, _, _ = tf.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
init = tf.global_variables_initializer()
sess.run(init)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_rfft2d_tests(options):
"""Make a set of tests to do rfft2d."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[8, 8], [3, 8, 8]],
"fft_length": [
None, [4, 4], [4, 8], [8, 4], [8, 8], [8, 16], [16, 8], [16, 16]
]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
with spectral_ops_test_util.fft_kernel_label_map():
outs = tf.signal.rfft2d(input_value, fft_length=parameters["fft_length"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
def generate_examples(options):
def mkdir_if_not_exist(x):
if not os.path.isdir(x):
os.mkdir(x)
if not os.path.isdir(x):
raise RuntimeError("Failed to create dir %r" % x)
opstest_path = os.path.join(options.output_path)
mkdir_if_not_exist(opstest_path)
out = options.zip_to_output
# Some zip filenames contain a postfix identifying the conversion mode. The
# list of valid conversion modes is defined in
# generated_test_conversion_modes() in build_def.bzl.
# Remove suffixes to extract the test name from the output name.
test_name = re.sub(r"(_(|toco-flex|forward-compat))?\.zip$", "", out, count=1)
test_function_name = "make_%s_tests" % test_name
if test_function_name not in _MAKE_TEST_FUNCTIONS_MAP:
raise RuntimeError("Can't find a test function to create %r. Tried %r" %
(out, test_function_name))
test_function = _MAKE_TEST_FUNCTIONS_MAP[test_function_name]
if options.make_forward_compat_test:
future_date = datetime.date.today() + datetime.timedelta(days=30)
with tf.compat.forward_compatibility_horizon(future_date.year,
future_date.month,
future_date.day):
test_function(options)
else:
test_function(options)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/testing/generate_examples_lib.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import argparse
import os
import sys
from tensorflow.lite.testing import generate_examples_lib
from tensorflow.lite.testing import toco_convert
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
parser.add_argument("output_path",
help="Directory where the outputs will be go.")
parser.add_argument(
"--zip_to_output",
type=str,
help="Particular zip to output.",
required=True)
parser.add_argument("--toco",
type=str,
help="Path to toco tool.",
required=True)
parser.add_argument(
"--known_bugs_are_errors",
action="store_true",
help=("If a particular model is affected by a known bug,"
" count it as a converter error."))
parser.add_argument(
"--ignore_converter_errors",
action="store_true",
help="Raise an exception if any converter error is encountered.")
parser.add_argument(
"--save_graphdefs",
action="store_true",
help="Include intermediate graphdefs in the output zip files.")
parser.add_argument(
"--run_with_flex",
action="store_true",
help="Whether the TFLite Flex converter is being used.")
parser.add_argument(
"--make_edgetpu_tests",
action="store_true",
help="Whether to generate test cases for edgetpu.")
parser.add_argument(
"--make_forward_compat_test",
action="store_true",
help="Make tests by setting TF forward compatibility horizon to the future")
# Toco binary path provided by the generate rule.
bin_path = None
def main(unused_args):
options = generate_examples_lib.Options()
options.output_path = FLAGS.output_path
options.zip_to_output = FLAGS.zip_to_output
options.toco = FLAGS.toco
options.known_bugs_are_errors = FLAGS.known_bugs_are_errors
options.ignore_converter_errors = FLAGS.ignore_converter_errors
options.save_graphdefs = FLAGS.save_graphdefs
options.run_with_flex = FLAGS.run_with_flex
options.make_edgetpu_tests = FLAGS.make_edgetpu_tests
options.make_forward_compat_test = FLAGS.make_forward_compat_test
options.tflite_convert_function = toco_convert.toco_convert
generate_examples_lib.generate_examples(options)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print("Usage: %s <path out> <zip file to generate>")
exit(1)
else:
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/testing/generate_examples.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to test TFLite models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six import PY3
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.python import convert_saved_model as _convert_saved_model
from tensorflow.lite.python import lite as _lite
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python import util as _util
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.saved_model import load as _load
from tensorflow.python.saved_model import loader as _loader
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
def get_filepath(filename, base_dir=None):
"""Returns the full path of the filename.
Args:
filename: Subdirectory and name of the model file.
base_dir: Base directory containing model file.
Returns:
str.
"""
if base_dir is None:
base_dir = "learning/brain/mobile/tflite_compat_models"
return os.path.join(_resource_loader.get_root_dir_with_all_resources(),
base_dir, filename)
def get_image(size):
"""Returns an image loaded into an np.ndarray with dims [1, size, size, 3].
Args:
size: Size of image.
Returns:
np.ndarray.
"""
img_filename = _resource_loader.get_path_to_datafile(
"testdata/grace_hopper.jpg")
img = image.load_img(img_filename, target_size=(size, size))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
return img_array
def _convert(converter, **kwargs):
"""Converts the model.
Args:
converter: TFLiteConverter object.
**kwargs: Additional arguments to be passed into the converter. Supported
flags are {"target_ops", "post_training_quantize", "quantize_to_float16"}.
Returns:
The converted TFLite model in serialized format.
Raises:
ValueError: Invalid version number.
"""
if "target_ops" in kwargs:
converter.target_spec.supported_ops = kwargs["target_ops"]
if "post_training_quantize" in kwargs:
converter.optimizations = [_lite.Optimize.DEFAULT]
if kwargs.get("quantize_to_float16", False):
converter.target_spec.supported_types = [constants.FLOAT16]
return converter.convert()
def _get_input_data_map(tflite_model, input_data):
"""Generates a map of input data based on the TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
input_data: List of np.ndarray.
Returns:
{str: [np.ndarray]}.
"""
interpreter = _lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
return {
input_tensor["name"]: data
for input_tensor, data in zip(input_details, input_data)
}
def _generate_random_input_data(tflite_model, seed=None):
"""Generates input data based on the input tensors in the TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
seed: Integer seed for the random generator. (default None)
Returns:
([np.ndarray], {str : [np.ndarray]}).
"""
interpreter = _lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
if seed:
np.random.seed(seed=seed)
input_data = [
np.array(
np.random.random_sample(input_tensor["shape"]),
dtype=input_tensor["dtype"]) for input_tensor in input_details
]
input_data_map = _get_input_data_map(tflite_model, input_data)
return input_data, input_data_map
def _evaluate_tflite_model(tflite_model, input_data):
"""Returns evaluation of input data on TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
input_data: List of np.ndarray.
Returns:
List of np.ndarray.
"""
interpreter = _lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor["index"], tensor_data)
interpreter.invoke()
output_data = [
interpreter.get_tensor(output_tensor["index"])
for output_tensor in output_details
]
output_labels = [output_tensor["name"] for output_tensor in output_details]
return output_data, output_labels
def evaluate_frozen_graph(filename, input_arrays, output_arrays):
"""Returns a function that evaluates the frozen graph on input data.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
with _file_io.FileIO(filename, "rb") as f:
file_content = f.read()
graph_def = _graph_pb2.GraphDef()
try:
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
if not isinstance(file_content, str):
if PY3:
file_content = file_content.decode("utf-8")
else:
file_content = file_content.encode("utf-8")
_text_format.Merge(file_content, graph_def)
graph = ops.Graph()
with graph.as_default():
_import_graph_def(graph_def, name="")
inputs = _util.get_tensors_from_tensor_names(graph, input_arrays)
outputs = _util.get_tensors_from_tensor_names(graph, output_arrays)
def run_session(input_data):
with _session.Session(graph=graph) as sess:
return sess.run(outputs, dict(zip(inputs, input_data)))
return run_session
def evaluate_saved_model(directory, tag_set, signature_key):
"""Returns a function that evaluates the SavedModel on input data.
Args:
directory: SavedModel directory to convert.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
with _session.Session().as_default() as sess:
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
meta_graph = _loader.load(sess, tag_set, directory)
signature_def = _convert_saved_model.get_signature_def(
meta_graph, signature_key)
inputs, outputs = _convert_saved_model.get_inputs_outputs(signature_def)
return lambda input_data: sess.run(outputs, dict(zip(inputs, input_data)))
def evaluate_keras_model(filename):
"""Returns a function that evaluates the tf.keras model on input data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
keras_model = _keras.models.load_model(filename)
return lambda input_data: [keras_model.predict(input_data)]
def compare_models(tflite_model, tf_eval_func, input_data=None, tolerance=5):
"""Compares TensorFlow and TFLite models.
Unless the input data is provided, the models are compared with random data.
Args:
tflite_model: Serialized TensorFlow Lite model.
tf_eval_func: Lambda function that takes in input data and outputs the
results of the TensorFlow model ([np.ndarray data] : [np.ndarray result]).
input_data: np.ndarray to pass into models during inference. (default None)
tolerance: Decimal place to check accuracy to. (default 5)
"""
if input_data is None:
input_data, _ = _generate_random_input_data(tflite_model)
tf_results = tf_eval_func(input_data)
tflite_results, _ = _evaluate_tflite_model(tflite_model, input_data)
for tf_result, tflite_result in zip(tf_results, tflite_results):
np.testing.assert_almost_equal(tf_result, tflite_result, tolerance)
def compare_models_v2(tflite_model, tf_eval_func, input_data=None, tolerance=5):
"""Compares TensorFlow and TFLite models for TensorFlow 2.0.
Unless the input data is provided, the models are compared with random data.
Currently only 1 input and 1 output are supported by this function.
Args:
tflite_model: Serialized TensorFlow Lite model.
tf_eval_func: Function to evaluate TensorFlow model. Either a lambda
function that takes in input data and outputs the results or a TensorFlow
ConcreteFunction.
input_data: np.ndarray to pass into models during inference. (default None)
tolerance: Decimal place to check accuracy to. (default 5)
"""
# Convert the input data into a map.
if input_data is None:
input_data, input_data_map = _generate_random_input_data(tflite_model)
else:
input_data_map = _get_input_data_map(tflite_model, input_data)
input_data_func_map = {
input_name: constant_op.constant(input_data)
for input_name, input_data in input_data_map.items()
}
if len(input_data) > 1:
tf_results = tf_eval_func(**input_data_func_map)
else:
tf_results = tf_eval_func(constant_op.constant(input_data[0]))
tflite_results, tflite_labels = _evaluate_tflite_model(
tflite_model, input_data)
# Convert the output TensorFlow results into an ordered list.
if isinstance(tf_results, dict):
if len(tf_results) == 1:
tf_results = [tf_results[tf_results.keys()[0]]]
else:
tf_results = [tf_results[tflite_label] for tflite_label in tflite_labels]
for tf_result, tflite_result in zip(tf_results, tflite_results):
np.testing.assert_almost_equal(tf_result, tflite_result, tolerance)
def test_frozen_graph_quant(filename,
input_arrays,
output_arrays,
input_shapes=None,
**kwargs):
"""Sanity check to validate post quantize flag alters the graph.
This test does not check correctness of the converted model. It converts the
TensorFlow frozen graph to TFLite with and without the post_training_quantized
flag. It ensures some tensors have different types between the float and
quantized models in the case of an all TFLite model or mix-and-match model.
It ensures tensor types do not change in the case of an all Flex model.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
**kwargs: Additional arguments to be passed into the converter.
Raises:
ValueError: post_training_quantize flag doesn't act as intended.
"""
# Convert and load the float model.
converter = _lite.TFLiteConverter.from_frozen_graph(
filename, input_arrays, output_arrays, input_shapes)
tflite_model_float = _convert(converter, **kwargs)
interpreter_float = _lite.Interpreter(model_content=tflite_model_float)
interpreter_float.allocate_tensors()
float_tensors = interpreter_float.get_tensor_details()
# Convert and load the quantized model.
converter = _lite.TFLiteConverter.from_frozen_graph(filename, input_arrays,
output_arrays)
tflite_model_quant = _convert(
converter, post_training_quantize=True, **kwargs)
interpreter_quant = _lite.Interpreter(model_content=tflite_model_quant)
interpreter_quant.allocate_tensors()
quant_tensors = interpreter_quant.get_tensor_details()
quant_tensors_map = {
tensor_detail["name"]: tensor_detail for tensor_detail in quant_tensors
}
# Check if weights are of different types in the float and quantized models.
num_tensors_float = len(float_tensors)
num_tensors_same_dtypes = sum(
float_tensor["dtype"] == quant_tensors_map[float_tensor["name"]]["dtype"]
for float_tensor in float_tensors)
has_quant_tensor = num_tensors_float != num_tensors_same_dtypes
# For the "flex" case, post_training_quantize should not alter the graph,
# unless we are quantizing to float16.
if ("target_ops" in kwargs and
not kwargs.get("quantize_to_float16", False) and
set(kwargs["target_ops"]) == set([_lite.OpsSet.SELECT_TF_OPS])):
if has_quant_tensor:
raise ValueError("--post_training_quantize flag unexpectedly altered the "
"full Flex mode graph.")
elif not has_quant_tensor:
raise ValueError("--post_training_quantize flag was unable to quantize the "
"graph as expected in TFLite and mix-and-match mode.")
def test_frozen_graph(filename,
input_arrays,
output_arrays,
input_shapes=None,
input_data=None,
**kwargs):
"""Validates the TensorFlow frozen graph converts to a TFLite model.
Converts the TensorFlow frozen graph to TFLite and checks the accuracy of the
model on random data.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_frozen_graph(
filename, input_arrays, output_arrays, input_shapes)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_frozen_graph(filename, input_arrays, output_arrays)
compare_models(tflite_model, tf_eval_func, input_data=input_data)
def test_saved_model(directory,
input_shapes=None,
tag_set=None,
signature_key=None,
input_data=None,
**kwargs):
"""Validates the TensorFlow SavedModel converts to a TFLite model.
Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the
model on random data.
Args:
directory: SavedModel directory to convert.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_saved_model(
directory,
input_shapes=input_shapes,
tag_set=tag_set,
signature_key=signature_key)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_saved_model(directory, tag_set, signature_key)
compare_models(tflite_model, tf_eval_func, input_data=input_data)
def test_saved_model_v2(directory,
tag_set=None,
signature_key=None,
input_data=None,
**kwargs):
"""Validates the TensorFlow SavedModel converts to a TFLite model.
Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the
model on random data.
Args:
directory: SavedModel directory to convert.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
model = _load.load(directory, tags=tag_set)
if not signature_key:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
concrete_func = model.signatures[signature_key]
converter = _lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = _convert(converter, **kwargs)
compare_models_v2(tflite_model, concrete_func, input_data=input_data)
def test_saved_model_v2_quant_float16(directory, **kwargs):
"""Validates the TensorFlow SavedModel converts to a TFLite model."""
converter = _lite.TFLiteConverterV2.from_saved_model(directory)
tflite_model_float = _convert(converter, version=2, **kwargs)
interpreter_float = _lite.Interpreter(model_content=tflite_model_float)
interpreter_float.allocate_tensors()
float_tensors = interpreter_float.get_tensor_details()
tflite_model_quant = _convert(
converter,
version=2,
post_training_quantize=True,
quantize_to_float16=True,
**kwargs)
interpreter_quant = _lite.Interpreter(model_content=tflite_model_quant)
interpreter_quant.allocate_tensors()
quant_tensors = interpreter_quant.get_tensor_details()
quant_tensors_map = {
tensor_detail["name"]: tensor_detail for tensor_detail in quant_tensors
}
# Check if weights are of different types in the float and quantized models.
num_tensors_float = len(float_tensors)
num_tensors_same_dtypes = sum(
float_tensor["dtype"] == quant_tensors_map[float_tensor["name"]]["dtype"]
for float_tensor in float_tensors)
has_quant_tensor = num_tensors_float != num_tensors_same_dtypes
if not has_quant_tensor:
raise ValueError("--post_training_quantize flag was unable to quantize the "
"graph as expected.")
def test_keras_model(filename,
input_arrays=None,
input_shapes=None,
input_data=None,
**kwargs):
"""Validates the tf.keras model converts to a TFLite model.
Converts the tf.keras model to TFLite and checks the accuracy of the model on
random data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_keras_model_file(
filename, input_arrays=input_arrays, input_shapes=input_shapes)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_keras_model(filename)
compare_models(tflite_model, tf_eval_func, input_data=input_data)
def test_keras_model_v2(filename, input_shapes=None, input_data=None, **kwargs):
"""Validates the tf.keras model converts to a TFLite model.
Converts the tf.keras model to TFLite and checks the accuracy of the model on
random data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
input_shapes: List of list of integers representing input shapes in the
order of the tf.keras model's .input attribute (e.g., [[1, 16, 16, 3]]).
(default None)
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
keras_model = _keras.models.load_model(filename)
if input_shapes:
for tensor, shape in zip(keras_model.inputs, input_shapes):
tensor.set_shape(shape)
converter = _lite.TFLiteConverterV2.from_keras_model(keras_model)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_keras_model(filename)
compare_models_v2(tflite_model, tf_eval_func, input_data=input_data)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/testing/model_coverage/model_coverage_lib.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_coverage_lib.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.lite.python import lite
from tensorflow.lite.testing.model_coverage import model_coverage_lib as model_coverage
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class EvaluateFrozenGraph(test.TestCase):
def _saveFrozenGraph(self, sess):
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
return graph_def_file
def testFloat(self):
with ops.Graph().as_default():
with session.Session().as_default() as sess:
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
filename = self._saveFrozenGraph(sess)
model_coverage.test_frozen_graph(filename, ['Placeholder'], ['add'])
def testMultipleOutputs(self):
with ops.Graph().as_default():
with session.Session().as_default() as sess:
in_tensor_1 = array_ops.placeholder(
shape=[1, 16], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16], dtype=dtypes.float32, name='inputB')
weight = constant_op.constant(-1.0, shape=[16, 16])
bias = constant_op.constant(-1.0, shape=[16])
layer = math_ops.matmul(in_tensor_1, weight) + bias
_ = math_ops.reduce_mean(math_ops.square(layer - in_tensor_2))
filename = self._saveFrozenGraph(sess)
model_coverage.test_frozen_graph(filename, ['inputA', 'inputB'],
['add', 'Mean'])
@test_util.run_in_graph_and_eager_modes
def testFunctions(self):
"""Tests functions."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = constant_op.constant(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
_ = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
filename = self._saveFrozenGraph(sess)
model_coverage.test_frozen_graph(filename, ['input'], ['output_node'])
def _getQuantizedModel(self):
np.random.seed(0)
with ops.Graph().as_default():
with session.Session().as_default() as sess:
# The tensor needs to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
_ = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
filename = self._saveFrozenGraph(sess)
return filename
def testQuantized(self):
filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant(filename, ['inputA'], ['output'])
def testQuantizedInputShapes(self):
filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant(
filename, ['inputA'], ['output'], input_shapes={'inputA': [33, 33]})
def testQuantizedFlexAll(self):
filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant(
filename, ['inputA'], ['output'],
target_ops=set([lite.OpsSet.SELECT_TF_OPS]))
class EvaluateSavedModel(test.TestCase):
def testFloat(self):
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with ops.Graph().as_default():
with session.Session().as_default() as sess:
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
model_coverage.test_saved_model(saved_model_dir)
class EvaluateKerasModel(test.TestCase):
def _getSingleInputKerasModel(self):
"""Returns single input Sequential tf.keras model."""
keras.backend.clear_session()
xs = [-1, 0, 1, 2, 3, 4]
ys = [-3, -1, 1, 3, 5, 7]
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.train_on_batch(xs, ys)
return model
def _saveKerasModel(self, model):
try:
fd, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
return keras_file
def testFloat(self):
model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model)
model_coverage.test_keras_model(keras_file)
def testPostTrainingQuantize(self):
model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model)
model_coverage.test_keras_model(keras_file, post_training_quantize=True)
def testTargetOps(self):
model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model)
model_coverage.test_keras_model(
keras_file,
target_ops=set([lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS]))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/testing/model_coverage/model_coverage_lib_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing for updating TensorFlow lite schema."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
from tensorflow.lite.schema import upgrade_schema as upgrade_schema_lib
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
EMPTY_TEST_SCHEMA_V1 = {
"version": 1,
"operator_codes": [],
"subgraphs": [],
}
EMPTY_TEST_SCHEMA_V3 = {
"version": 3,
"operator_codes": [],
"subgraphs": [],
"buffers": [{
"data": []
}]
}
TEST_SCHEMA_V0 = {
"operator_codes": [],
"tensors": [],
"inputs": [],
"outputs": [],
"operators": [],
"version": 0
}
TEST_SCHEMA_V3 = {
"operator_codes": [],
"buffers": [{
"data": []
}],
"subgraphs": [{
"tensors": [],
"inputs": [],
"outputs": [],
"operators": [],
}],
"version":
3
}
FULL_TEST_SCHEMA_V1 = {
"version":
1,
"operator_codes": [
{
"builtin_code": "CONVOLUTION"
},
{
"builtin_code": "DEPTHWISE_CONVOLUTION"
},
{
"builtin_code": "AVERAGE_POOL"
},
{
"builtin_code": "MAX_POOL"
},
{
"builtin_code": "L2_POOL"
},
{
"builtin_code": "SIGMOID"
},
{
"builtin_code": "L2NORM"
},
{
"builtin_code": "LOCAL_RESPONSE_NORM"
},
{
"builtin_code": "ADD"
},
{
"builtin_code": "Basic_RNN"
},
],
"subgraphs": [{
"operators": [
{
"builtin_options_type": "PoolOptions"
},
{
"builtin_options_type": "DepthwiseConvolutionOptions"
},
{
"builtin_options_type": "ConvolutionOptions"
},
{
"builtin_options_type": "LocalResponseNormOptions"
},
{
"builtin_options_type": "BasicRNNOptions"
},
],
}],
"description":
"",
}
FULL_TEST_SCHEMA_V3 = {
"version":
3,
"operator_codes": [
{
"builtin_code": "CONV_2D"
},
{
"builtin_code": "DEPTHWISE_CONV_2D"
},
{
"builtin_code": "AVERAGE_POOL_2D"
},
{
"builtin_code": "MAX_POOL_2D"
},
{
"builtin_code": "L2_POOL_2D"
},
{
"builtin_code": "LOGISTIC"
},
{
"builtin_code": "L2_NORMALIZATION"
},
{
"builtin_code": "LOCAL_RESPONSE_NORMALIZATION"
},
{
"builtin_code": "ADD"
},
{
"builtin_code": "RNN"
},
],
"subgraphs": [{
"operators": [
{
"builtin_options_type": "Pool2DOptions"
},
{
"builtin_options_type": "DepthwiseConv2DOptions"
},
{
"builtin_options_type": "Conv2DOptions"
},
{
"builtin_options_type": "LocalResponseNormalizationOptions"
},
{
"builtin_options_type": "RNNOptions"
},
],
}],
"description":
"",
"buffers": [{
"data": []
}]
}
BUFFER_TEST_V2 = {
"operator_codes": [],
"buffers": [],
"subgraphs": [{
"tensors": [
{
"data_buffer": [1, 2, 3, 4]
},
{
"data_buffer": [1, 2, 3, 4, 5, 6, 7, 8]
},
{
"data_buffer": []
},
],
"inputs": [],
"outputs": [],
"operators": [],
}],
"version":
2
}
BUFFER_TEST_V3 = {
"operator_codes": [],
"subgraphs": [{
"tensors": [
{
"buffer": 1
},
{
"buffer": 2
},
{
"buffer": 0
},
],
"inputs": [],
"outputs": [],
"operators": [],
}],
"buffers": [
{
"data": []
},
{
"data": [1, 2, 3, 4]
},
{
"data": [1, 2, 3, 4, 5, 6, 7, 8]
},
],
"version":
3
}
def JsonDumpAndFlush(data, fp):
"""Write the dictionary `data` to a JSON file `fp` (and flush).
Args:
data: in a dictionary that is JSON serializable.
fp: File-like object
"""
json.dump(data, fp)
fp.flush()
class TestSchemaUpgrade(test_util.TensorFlowTestCase):
def testNonExistentFile(self):
converter = upgrade_schema_lib.Converter()
fd, non_existent = tempfile.mkstemp(suffix=".json")
os.close(fd)
with self.assertRaisesRegexp(IOError, "No such file or directory"):
converter.Convert(non_existent, non_existent)
def testInvalidExtension(self):
converter = upgrade_schema_lib.Converter()
fd, invalid_extension = tempfile.mkstemp(suffix=".foo")
os.close(fd)
with self.assertRaisesRegexp(ValueError, "Invalid extension on input"):
converter.Convert(invalid_extension, invalid_extension)
with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json:
JsonDumpAndFlush(EMPTY_TEST_SCHEMA_V1, in_json)
with self.assertRaisesRegexp(ValueError, "Invalid extension on output"):
converter.Convert(in_json.name, invalid_extension)
def CheckConversion(self, data_old, data_expected):
"""Given a data dictionary, test upgrading to current version.
Args:
data_old: TFLite model as a dictionary (arbitrary version).
data_expected: TFLite model as a dictionary (upgraded).
"""
converter = upgrade_schema_lib.Converter()
with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json, \
tempfile.NamedTemporaryFile(
suffix=".json", mode="w+") as out_json, \
tempfile.NamedTemporaryFile(
suffix=".bin", mode="w+b") as out_bin, \
tempfile.NamedTemporaryFile(
suffix=".tflite", mode="w+b") as out_tflite:
JsonDumpAndFlush(data_old, in_json)
# Test JSON output
converter.Convert(in_json.name, out_json.name)
# Test binary output
# Convert to .tflite and then to .bin and check if binary is equal
converter.Convert(in_json.name, out_tflite.name)
converter.Convert(out_tflite.name, out_bin.name)
self.assertEqual(
open(out_bin.name, "rb").read(),
open(out_tflite.name, "rb").read())
# Test that conversion actually produced successful new json.
converted_schema = json.load(out_json)
self.assertEqual(converted_schema, data_expected)
def testAlreadyUpgraded(self):
"""A file already at version 3 should stay at version 3."""
self.CheckConversion(EMPTY_TEST_SCHEMA_V3, EMPTY_TEST_SCHEMA_V3)
self.CheckConversion(TEST_SCHEMA_V3, TEST_SCHEMA_V3)
self.CheckConversion(BUFFER_TEST_V3, BUFFER_TEST_V3)
# Disable this while we have incorrectly versioned structures around.
# def testV0Upgrade_IntroducesSubgraphs(self):
# """V0 did not have subgraphs; check to make sure they get introduced."""
# self.CheckConversion(TEST_SCHEMA_V0, TEST_SCHEMA_V3)
def testV1Upgrade_RenameOps(self):
"""V1 had many different names for ops; check to make sure they rename."""
self.CheckConversion(EMPTY_TEST_SCHEMA_V1, EMPTY_TEST_SCHEMA_V3)
self.CheckConversion(FULL_TEST_SCHEMA_V1, FULL_TEST_SCHEMA_V3)
def testV2Upgrade_CreateBuffers(self):
"""V2 did not have buffers; check to make sure they are created."""
self.CheckConversion(BUFFER_TEST_V2, BUFFER_TEST_V3)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/schema/upgrade_schema_test.py
|
# ==============================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upgrade script to move from pre-release schema to new schema.
Usage examples:
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.tflite out.tflite
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import tensorflow as tf
from tensorflow.python.platform import resource_loader
parser = argparse.ArgumentParser(
description="Script to move TFLite models from pre-release schema to "
"new schema.")
parser.add_argument(
"input",
type=str,
help="Input TensorFlow lite file in `.json`, `.bin` or `.tflite` format.")
parser.add_argument(
"output",
type=str,
help="Output json or bin TensorFlow lite model compliant with "
"the new schema. Extension must be `.json`, `.bin` or `.tflite`.")
# RAII Temporary Directory, because flatc doesn't allow direct use of tempfiles.
@contextlib.contextmanager
def TemporaryDirectoryResource():
temporary = tempfile.mkdtemp()
try:
yield temporary
finally:
shutil.rmtree(temporary)
class Converter(object):
"""Converts TensorFlow flatbuffer models from old to new version of schema.
This can convert between any version to the latest version. It uses
an incremental upgrade strategy to go from version to version.
Usage:
converter = Converter()
converter.Convert("a.tflite", "a.json")
converter.Convert("b.json", "b.tflite")
"""
def __init__(self):
# TODO(aselle): make this work in the open source version with better
# path.
paths_to_try = [
"../../../../flatbuffers/flatc", # not bazel
"../../../../external/flatbuffers/flatc" # bazel
]
for p in paths_to_try:
self._flatc_path = resource_loader.get_path_to_datafile(p)
if os.path.exists(self._flatc_path): break
def FindSchema(base_name):
return resource_loader.get_path_to_datafile("%s" % base_name)
# Supported schemas for upgrade.
self._schemas = [
(0, FindSchema("schema_v0.fbs"), True, self._Upgrade0To1),
(1, FindSchema("schema_v1.fbs"), True, self._Upgrade1To2),
(2, FindSchema("schema_v2.fbs"), True, self._Upgrade2To3),
(3, FindSchema("schema_v3.fbs"), False, None) # Non-callable by design.
]
# Ensure schemas are sorted, and extract latest version and upgrade
# dispatch function table.
self._schemas.sort()
self._new_version, self._new_schema = self._schemas[-1][:2]
self._upgrade_dispatch = {
version: dispatch
for version, unused1, unused2, dispatch in self._schemas}
def _Read(self, input_file, schema, raw_binary=False):
"""Read a tflite model assuming the given flatbuffer schema.
If `input_file` is in bin, then we must use flatc to convert the schema
from binary to json.
Args:
input_file: a binary (flatbuffer) or json file to read from. Extension
must be `.tflite`, `.bin`, or `.json` for FlatBuffer Binary or
FlatBuffer JSON.
schema: which schema to use for reading
raw_binary: whether to assume raw_binary (versions previous to v3)
that lacked file_identifier require this.
Raises:
RuntimeError: 1. When flatc cannot be invoked.
2. When json file does not exists.
ValueError: When the extension is not json or bin.
Returns:
A dictionary representing the read tflite model.
"""
raw_binary = ["--raw-binary"] if raw_binary else []
with TemporaryDirectoryResource() as tempdir:
basename = os.path.basename(input_file)
basename_no_extension, extension = os.path.splitext(basename)
if extension in [".bin", ".tflite"]:
# Convert to json using flatc
returncode = subprocess.call([
self._flatc_path,
"-t",
"--strict-json",
"--defaults-json",
] + raw_binary + ["-o", tempdir, schema, "--", input_file])
if returncode != 0:
raise RuntimeError("flatc failed to convert from binary to json.")
json_file = os.path.join(tempdir, basename_no_extension + ".json")
if not os.path.exists(json_file):
raise RuntimeError("Could not find %r" % json_file)
elif extension == ".json":
json_file = input_file
else:
raise ValueError("Invalid extension on input file %r" % input_file)
return json.load(open(json_file))
def _Write(self, data, output_file):
"""Output a json or bin version of the flatbuffer model.
Args:
data: Dict representing the TensorFlow Lite model to write.
output_file: filename to write the converted flatbuffer to. (json,
tflite, or bin extension is required).
Raises:
ValueError: When the extension is not json or bin
RuntimeError: When flatc fails to convert json data to binary.
"""
_, extension = os.path.splitext(output_file)
with TemporaryDirectoryResource() as tempdir:
if extension == ".json":
json.dump(data, open(output_file, "w"), sort_keys=True, indent=2)
elif extension in [".tflite", ".bin"]:
input_json = os.path.join(tempdir, "temp.json")
with open(input_json, "w") as fp:
json.dump(data, fp, sort_keys=True, indent=2)
returncode = subprocess.call([
self._flatc_path, "-b", "--defaults-json", "--strict-json", "-o",
tempdir, self._new_schema, input_json
])
if returncode != 0:
raise RuntimeError("flatc failed to convert upgraded json to binary.")
shutil.copy(os.path.join(tempdir, "temp.tflite"), output_file)
else:
raise ValueError("Invalid extension on output file %r" % output_file)
def _Upgrade0To1(self, data):
"""Upgrade data from Version 0 to Version 1.
Changes: Added subgraphs (which contains a subset of formally global
entries).
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
subgraph = {}
for key_to_promote in ["tensors", "operators", "inputs", "outputs"]:
subgraph[key_to_promote] = data[key_to_promote]
del data[key_to_promote]
data["subgraphs"] = [subgraph]
def _Upgrade1To2(self, data):
"""Upgrade data from Version 1 to Version 2.
Changes: Rename operators to Conform to NN API.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
Raises:
ValueError: Throws when model builtins are numeric rather than symbols.
"""
def RemapOperator(opcode_name):
"""Go from old schema op name to new schema op name.
Args:
opcode_name: String representing the ops (see :schema.fbs).
Returns:
Converted opcode_name from V1 to V2.
"""
old_name_to_new_name = {
"CONVOLUTION": "CONV_2D",
"DEPTHWISE_CONVOLUTION": "DEPTHWISE_CONV_2D",
"AVERAGE_POOL": "AVERAGE_POOL_2D",
"MAX_POOL": "MAX_POOL_2D",
"L2_POOL": "L2_POOL_2D",
"SIGMOID": "LOGISTIC",
"L2NORM": "L2_NORMALIZATION",
"LOCAL_RESPONSE_NORM": "LOCAL_RESPONSE_NORMALIZATION",
"Basic_RNN": "RNN",
}
return (old_name_to_new_name[opcode_name]
if opcode_name in old_name_to_new_name else opcode_name)
def RemapOperatorType(operator_type):
"""Remap operator structs from old names to new names.
Args:
operator_type: String representing the builtin operator data type
string.
(see :schema.fbs).
Raises:
ValueError: When the model has consistency problems.
Returns:
Upgraded builtin operator data type as a string.
"""
old_to_new = {
"PoolOptions": "Pool2DOptions",
"DepthwiseConvolutionOptions": "DepthwiseConv2DOptions",
"ConvolutionOptions": "Conv2DOptions",
"LocalResponseNormOptions": "LocalResponseNormalizationOptions",
"BasicRNNOptions": "RNNOptions",
}
return (old_to_new[operator_type]
if operator_type in old_to_new else operator_type)
for subgraph in data["subgraphs"]:
for ops in subgraph["operators"]:
ops["builtin_options_type"] = RemapOperatorType(
ops["builtin_options_type"])
# Upgrade the operator codes
for operator_code in data["operator_codes"]:
# Check if builtin_code is the appropriate string type
# use type("") instead of str or unicode. for py2and3
if not isinstance(operator_code["builtin_code"], type(u"")):
raise ValueError("builtin_code %r is non-string. this usually means "
"your model has consistency problems." %
(operator_code["builtin_code"]))
operator_code["builtin_code"] = (RemapOperator(
operator_code["builtin_code"]))
def _Upgrade2To3(self, data):
"""Upgrade data from Version 2 to Version 3.
Changed actual read-only tensor data to be in a buffers table instead
of inline with the tensor.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
buffers = [{"data": []}] # Start with 1 empty buffer
for subgraph in data["subgraphs"]:
if "tensors" not in subgraph:
continue
for tensor in subgraph["tensors"]:
if "data_buffer" not in tensor:
tensor["buffer"] = 0
else:
if tensor["data_buffer"]:
tensor[u"buffer"] = len(buffers)
buffers.append({"data": tensor["data_buffer"]})
else:
tensor["buffer"] = 0
del tensor["data_buffer"]
data["buffers"] = buffers
def _PerformUpgrade(self, data):
"""Manipulate the `data` (parsed JSON) based on changes in format.
This incrementally will upgrade from version to version within data.
Args:
data: Dictionary representing the TensorFlow data. This will be upgraded
in place.
"""
while data["version"] < self._new_version:
self._upgrade_dispatch[data["version"]](data)
data["version"] += 1
def Convert(self, input_file, output_file):
"""Perform schema conversion from input_file to output_file.
Args:
input_file: Filename of TensorFlow Lite data to convert from. Must
be `.json` or `.bin` extension files for JSON or Binary forms of
the TensorFlow FlatBuffer schema.
output_file: Filename to write to. Extension also must be `.json`
or `.bin`.
Raises:
RuntimeError: Generated when none of the upgrader supported schemas
matche the `input_file` data.
"""
# Read data in each schema (since they are incompatible). Version is
# always present. Use the read data that matches the version of the
# schema.
for version, schema, raw_binary, _ in self._schemas:
try:
data_candidate = self._Read(input_file, schema, raw_binary)
except RuntimeError:
continue # Skip and hope another schema works
if "version" not in data_candidate: # Assume version 1 if not present.
data_candidate["version"] = 1
elif data_candidate["version"] == 0: # Version 0 doesn't exist in wild.
data_candidate["version"] = 1
if data_candidate["version"] == version:
self._PerformUpgrade(data_candidate)
self._Write(data_candidate, output_file)
return
raise RuntimeError("No schema that the converter understands worked with "
"the data file you provided.")
def main(argv):
del argv
Converter().Convert(FLAGS.input, FLAGS.output)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/schema/upgrade_schema.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""label_image for tflite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
from PIL import Image
from tensorflow.lite.python.interpreter import Interpreter
def load_labels(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f.readlines()]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--image',
default='/tmp/grace_hopper.bmp',
help='image to be classified')
parser.add_argument(
'-m',
'--model_file',
default='/tmp/mobilenet_v1_1.0_224_quant.tflite',
help='.tflite model to be executed')
parser.add_argument(
'-l',
'--label_file',
default='/tmp/labels.txt',
help='name of file containing labels')
parser.add_argument('--input_mean', default=127.5, help='input_mean')
parser.add_argument(
'--input_std', default=127.5, help='input standard deviation')
args = parser.parse_args()
interpreter = Interpreter(model_path=args.model_file)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# check the type of the input tensor
floating_model = input_details[0]['dtype'] == np.float32
# NxHxWxC, H:1, W:2
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
img = Image.open(args.image).resize((width, height))
# add N dim
input_data = np.expand_dims(img, axis=0)
if floating_model:
input_data = (np.float32(input_data) - args.input_mean) / args.input_std
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
results = np.squeeze(output_data)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(args.label_file)
for i in top_k:
if floating_model:
print('{:08.6f}: {}'.format(float(results[i]), labels[i]))
else:
print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/examples/python/label_image.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset interface to the MNIST dataset.
This is cloned from
https://github.com/tensorflow/models/blob/master/official/mnist/dataset.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import shutil
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/tutorials/dataset.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to evaluate accuracy of TFLite flatbuffer model on mnist dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.lite.tutorials import dataset
flags = tf.app.flags
flags.DEFINE_string('data_dir', '/tmp/data_dir',
'Directory where data is stored.')
flags.DEFINE_string('model_file', '',
'The path to the TFLite flatbuffer model file.')
flags = flags.FLAGS
def test_image_generator():
# Generates an iterator over images
with tf.compat.v1.Session() as sess:
input_data = tf.compat.v1.data.make_one_shot_iterator(dataset.test(
flags.data_dir)).get_next()
try:
while True:
yield sess.run(input_data)
except tf.errors.OutOfRangeError:
pass
def run_eval(interpreter, input_image):
"""Performs evaluation for input image over specified model.
Args:
interpreter: TFLite interpreter initialized with model to execute.
input_image: Image input to the model.
Returns:
output: output tensor of model being executed.
"""
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on the input images.
input_image = np.reshape(input_image, input_details[0]['shape'])
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
output = np.squeeze(output_data)
return output
def main(_):
interpreter = tf.lite.Interpreter(model_path=flags.model_file)
interpreter.allocate_tensors()
num_correct, total = 0, 0
for input_data in test_image_generator():
output = run_eval(interpreter, input_data[0])
total += 1
if output == input_data[1]:
num_correct += 1
if total % 500 == 0:
print('Accuracy after %i images: %f' %
(total, float(num_correct) / float(total)))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/tutorials/mnist_tflite.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub to make toco convert accessible."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(aselle): Remove once no clients internally depend on this.
# pylint: disable=unused-import
from tensorflow.python.pywrap_tensorflow import TocoConvert
# pylint: enable=unused-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/toco/python/tensorflow_wrap_toco.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import tensorflow as tf
from tensorflow.lite.toco import model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2
from tensorflow.lite.toco import types_pb2
from tensorflow.python.platform import googletest
from tensorflow.python.platform import resource_loader
def TensorName(x):
"""Get the canonical (non foo:0 name)."""
return x.name.split(":")[0]
class TocoFromProtosTest(googletest.TestCase):
def _run(self, sess, in_tensor, out_tensor, should_succeed):
"""Use toco binary to check conversion from graphdef to tflite.
Args:
sess: Active TensorFlow session containing graph.
in_tensor: TensorFlow tensor to use as input.
out_tensor: TensorFlow tensor to use as output.
should_succeed: Whether this is a valid conversion.
"""
# Build all protos and extract graphdef
graph_def = sess.graph_def
toco_flags = toco_flags_pb2.TocoFlags()
toco_flags.input_format = toco_flags_pb2.TENSORFLOW_GRAPHDEF
toco_flags.output_format = toco_flags_pb2.TFLITE
toco_flags.inference_input_type = types_pb2.FLOAT
toco_flags.inference_type = types_pb2.FLOAT
toco_flags.allow_custom_ops = True
model_flags = model_flags_pb2.ModelFlags()
input_array = model_flags.input_arrays.add()
input_array.name = TensorName(in_tensor)
input_array.shape.dims.extend(map(int, in_tensor.shape))
model_flags.output_arrays.append(TensorName(out_tensor))
# Shell out to run toco (in case it crashes)
with tempfile.NamedTemporaryFile() as fp_toco, \
tempfile.NamedTemporaryFile() as fp_model, \
tempfile.NamedTemporaryFile() as fp_input, \
tempfile.NamedTemporaryFile() as fp_output:
fp_model.write(model_flags.SerializeToString())
fp_toco.write(toco_flags.SerializeToString())
fp_input.write(graph_def.SerializeToString())
fp_model.flush()
fp_toco.flush()
fp_input.flush()
tflite_bin = resource_loader.get_path_to_datafile("toco_from_protos.par")
cmdline = " ".join([
tflite_bin, fp_model.name, fp_toco.name, fp_input.name, fp_output.name
])
exitcode = os.system(cmdline)
if exitcode == 0:
stuff = fp_output.read()
self.assertEqual(stuff is not None, should_succeed)
else:
self.assertFalse(should_succeed)
def test_toco(self):
"""Run a couple of TensorFlow graphs against TOCO through the python bin."""
with tf.compat.v1.Session() as sess:
img = tf.placeholder(name="img", dtype=tf.float32, shape=(1, 64, 64, 3))
val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.])
out = tf.identity(val, name="out")
out2 = tf.sin(val, name="out2")
# This is a valid mdoel
self._run(sess, img, out, True)
# This uses an invalid function.
# TODO(aselle): Check to make sure a warning is included.
self._run(sess, img, out2, True)
# This is an identity graph, which doesn't work
self._run(sess, img, img, False)
if __name__ == "__main__":
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/toco/python/toco_from_protos_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python console command to invoke TOCO from serialized protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import app
FLAGS = None
def execute(unused_args):
"""Runs the converter."""
with open(FLAGS.model_proto_file, "rb") as model_file:
model_str = model_file.read()
with open(FLAGS.toco_proto_file, "rb") as toco_file:
toco_str = toco_file.read()
with open(FLAGS.model_input_file, "rb") as input_file:
input_str = input_file.read()
debug_info_str = ""
if FLAGS.debug_proto_file:
with open(FLAGS.debug_proto_file, "rb") as debug_info_file:
debug_info_str = debug_info_file.read()
enable_mlir_converter = FLAGS.enable_mlir_converter
output_str = pywrap_tensorflow.TocoConvert(
model_str,
toco_str,
input_str,
False, # extended_return
debug_info_str,
enable_mlir_converter)
open(FLAGS.model_output_file, "wb").write(output_str)
sys.exit(0)
def main():
global FLAGS
parser = argparse.ArgumentParser(
description="Invoke toco using protos as input.")
parser.add_argument(
"model_proto_file",
type=str,
help="File containing serialized proto that describes the model.")
parser.add_argument(
"toco_proto_file",
type=str,
help="File containing serialized proto describing how TOCO should run.")
parser.add_argument(
"model_input_file", type=str, help="Input model is read from this file.")
parser.add_argument(
"model_output_file",
type=str,
help="Result of applying TOCO conversion is written here.")
parser.add_argument(
"--debug_proto_file",
type=str,
default="",
help=("File containing serialized `GraphDebugInfo` proto that describes "
"logging information."))
parser.add_argument(
"--enable_mlir_converter",
action="store_true",
help=("Boolean indiciating whether to enable the MLIR converter instead "
"of TOCO converter. (default False)"))
FLAGS, unparsed = parser.parse_known_args()
app.run(main=execute, argv=[sys.argv[0]] + unparsed)
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/lite/toco/python/toco_from_protos.py
|
# pylint: disable=g-import-not-at-top
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contrib module containing volatile or experimental code.
Warning: The `tf.contrib` module will not be included in TensorFlow 2.0. Many
of its submodules have been integrated into TensorFlow core, or spun-off into
other projects like [`tensorflow_io`](https://github.com/tensorflow/io), or
[`tensorflow_addons`](https://github.com/tensorflow/addons). For instructions
on how to upgrade see the
[Migration guide](https://www.tensorflow.org/beta/guide/migration_guide).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import autograph
from tensorflow.contrib import batching
from tensorflow.contrib import bayesflow
from tensorflow.contrib import checkpoint
from tensorflow.contrib import cluster_resolver
from tensorflow.contrib import compiler
from tensorflow.contrib import constrained_optimization
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distribute
from tensorflow.contrib import distributions
from tensorflow.contrib import estimator
from tensorflow.contrib import factorization
from tensorflow.contrib import feature_column
from tensorflow.contrib import framework
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import mixed_precision
from tensorflow.contrib import model_pruning
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import periodic_resample
from tensorflow.contrib import predictor
from tensorflow.contrib import proto
from tensorflow.contrib import quantization
from tensorflow.contrib import quantize
from tensorflow.contrib import reduce_slice_ops
from tensorflow.contrib import resampler
from tensorflow.contrib import rnn
from tensorflow.contrib import rpc
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import timeseries
from tensorflow.contrib import tpu
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.eager.python import tfe as eager
from tensorflow.contrib.optimizer_v2 import optimizer_v2_symbols as optimizer_v2
from tensorflow.contrib.receptive_field import receptive_field_api as receptive_field
from tensorflow.contrib.recurrent.python import recurrent_api as recurrent
from tensorflow.contrib.remote_fused_graph import pylib as remote_fused_graph
from tensorflow.contrib.specs import python as specs
from tensorflow.contrib.summary import summary
if os.name != "nt" and platform.machine() != "s390x":
try:
from tensorflow.contrib import cloud
except ImportError:
pass
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg", globals(),
"tensorflow.contrib.ffmpeg")
del os
del platform
del LazyLoader
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This is the legacy module for AutoGraph, kept for backward compatibility.
New users should instead use `tensorflow.python.autograph`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph import * # pylint:disable=wildcard-import
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/autograph/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common benchmarking code.
See https://www.tensorflow.org/community/benchmarks for usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
class ReportingBenchmark(tf.test.Benchmark):
"""Base class for a benchmark that reports general performance metrics.
Subclasses only need to call one of the _profile methods, and optionally
report_results.
"""
def time_execution(self, name, target, iters, warm_up_iters=5):
for _ in range(warm_up_iters):
target()
all_times = []
for _ in range(iters):
iter_time = time.time()
target()
all_times.append(time.time() - iter_time)
avg_time = np.average(all_times)
extras = {}
extras['all_times'] = all_times
if isinstance(name, tuple):
extras['name'] = name
name = '_'.join(str(piece) for piece in name)
self.report_benchmark(
iters=iters, wall_time=avg_time, name=name, extras=extras)
if __name__ == '__main__':
tf.test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/autograph/examples/benchmarks/benchmark_base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A basic RL cartpole benchmark.
The RL model uses the OpenAI Gym environment to train a simple network using
the policy gradients method. The training scales the gradients for each step
by the episode's cumulative discounted reward and averages these gradients over
a fixed number of games before applying the optimization step.
For benchmarking purposes, we replace the OpenAI Gym environment to a fake
that returns random actions and rewards and never ends the episode. This way
the benchmarks compare the same amount of computation at each step.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
import tensorflow as tf
from tensorflow.contrib import eager
from tensorflow.contrib.autograph.examples.benchmarks import benchmark_base
from tensorflow.python import autograph as ag
from tensorflow.python.eager import context
#
# AutoGraph implementation
#
@ag.convert()
def graph_append_discounted_rewards(destination, rewards, discount_rate):
"""Discounts episode rewards and appends them to destination."""
ag.set_element_type(rewards, tf.float32)
cdr = 0.0
reverse_discounted = []
ag.set_element_type(reverse_discounted, tf.float32)
for i in range(len(rewards) - 1, -1, -1):
cdr = cdr * discount_rate + rewards[i]
cdr.set_shape(())
reverse_discounted.append(cdr)
retval = destination
# Note: AutoGraph doesn't yet support reversed() so we use a loop instead.
for i in range(len(reverse_discounted) - 1, -1, -1):
retval.append(reverse_discounted[i])
return retval
class GraphPolicyNetwork(tf.keras.Model):
"""Policy network for the cart-pole reinforcement learning problem.
The forward path of the network takes an observation from the cart-pole
environment (length-4 vector) and outputs an action.
"""
def __init__(self, hidden_size):
super(GraphPolicyNetwork, self).__init__()
self._hidden_layer = tf.keras.layers.Dense(
hidden_size, activation=tf.nn.elu)
self._output_layer = tf.keras.layers.Dense(1)
def call(self, inputs):
"""Calculates logits and action.
Args:
inputs: Observations from a step in the cart-pole environment, of shape
`(batch_size, input_size)`
Returns:
logits: the logits output by the output layer. This can be viewed as the
likelihood vales of choosing the left (0) action. Shape:
`(batch_size, 1)`.
actions: randomly selected actions ({0, 1}) based on the logits. Shape:
`(batch_size, 1)`.
"""
hidden = self._hidden_layer(inputs)
logits = self._output_layer(hidden)
left_prob = tf.nn.sigmoid(logits)
action_probs = tf.concat([left_prob, 1.0 - left_prob], 1)
actions = tf.multinomial(tf.log(action_probs), 1)
return logits, actions
# TODO(mdan): Move this method out of the class.
@ag.convert()
def train(self, cart_pole_env, optimizer, discount_rate, num_games,
max_steps_per_game):
var_list = tf.trainable_variables()
grad_list = [
tf.TensorArray(tf.float32, 0, dynamic_size=True) for _ in var_list
]
step_counts = []
discounted_rewards = []
ag.set_element_type(discounted_rewards, tf.float32)
ag.set_element_type(step_counts, tf.int32)
# Note: we use a shared object, cart_pole_env here. Because calls to the
# object's method are made through py_func, TensorFlow cannot detect its
# data dependencies. Hence we must manually synchronize access to it
# and ensure the control dependencies are set in such a way that
# calls to reset(), take_one_step, etc. are made in the correct order.
sync_counter = tf.constant(0)
for _ in tf.range(num_games):
with tf.control_dependencies([sync_counter]):
obs = cart_pole_env.reset()
with tf.control_dependencies([obs]):
sync_counter += 1
game_rewards = []
ag.set_element_type(game_rewards, tf.float32)
for step in tf.range(max_steps_per_game):
logits, actions = self(obs) # pylint:disable=not-callable
logits = tf.reshape(logits, ())
actions = tf.reshape(actions, ())
labels = 1.0 - tf.cast(actions, tf.float32)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
grads = tf.gradients(loss, var_list)
for i in range(len(grads)):
grad_list[i].append(grads[i])
with tf.control_dependencies([sync_counter]):
obs, reward, done = cart_pole_env.step(actions)
with tf.control_dependencies([obs]):
sync_counter += 1
obs = tf.reshape(obs, (1, 4))
game_rewards.append(reward)
if reward < 0.1 or done:
step_counts.append(step + 1)
break
discounted_rewards = graph_append_discounted_rewards(
discounted_rewards, game_rewards, discount_rate)
discounted_rewards = ag.stack(discounted_rewards)
discounted_rewards.set_shape((None,))
mean, variance = tf.nn.moments(discounted_rewards, [0])
normalized_rewards = (discounted_rewards - mean) / tf.sqrt(variance)
for i in range(len(grad_list)):
g = ag.stack(grad_list[i])
# This block just adjusts the shapes to match for multiplication.
r = normalized_rewards
if r.shape.ndims < g.shape.ndims:
r = tf.expand_dims(r, -1)
if r.shape.ndims < g.shape.ndims:
r = tf.expand_dims(r, -1)
grad_list[i] = tf.reduce_mean(g * r, axis=0)
optimizer.apply_gradients(
zip(grad_list, var_list), global_step=tf.train.get_global_step())
return ag.stack(step_counts)
@ag.convert()
def graph_train_model(policy_network, cart_pole_env, optimizer, iterations):
"""Trains the policy network for a given number of iterations."""
i = tf.constant(0)
mean_steps_per_iteration = []
ag.set_element_type(mean_steps_per_iteration, tf.int32)
while i < iterations:
steps_per_game = policy_network.train(
cart_pole_env,
optimizer,
discount_rate=0.95,
num_games=20,
max_steps_per_game=200)
mean_steps_per_iteration.append(tf.reduce_mean(steps_per_game))
i += 1
return ag.stack(mean_steps_per_iteration)
class GraphGymCartpoleEnv(object):
"""An env backed by OpenAI Gym's CartPole environment.
Used to confirm a functional model only.
"""
def __init__(self):
cart_pole_env = gym.make('CartPole-v1')
cart_pole_env.seed(0)
cart_pole_env.reset()
self.env = cart_pole_env
def reset(self):
obs = ag.utils.wrap_py_func(self.env.reset, tf.float64, ())
obs = tf.reshape(obs, (1, 4))
obs = tf.cast(obs, tf.float32)
return obs
def step(self, actions):
def take_one_step(actions):
obs, reward, done, _ = self.env.step(actions)
obs = obs.astype(np.float32)
reward = np.float32(reward)
return obs, reward, done
return ag.utils.wrap_py_func(take_one_step,
(tf.float32, tf.float32, tf.bool), (actions,))
class GraphRandomCartpoleEnv(object):
"""An environment that returns random actions and never finishes.
Used during benchmarking, it will cause training to run a constant number of
steps.
"""
def reset(self):
return tf.random.normal((1, 4))
def step(self, actions):
with tf.control_dependencies([actions]):
random_obs = tf.random.normal((1, 4))
fixed_reward = tf.constant(0.001)
done = tf.constant(False)
return random_obs, fixed_reward, done
#
# Eager implementation
#
def eager_append_discounted_rewards(discounted_rewards, rewards, discount_rate):
cdr = 0.0
reverse_discounted = []
for i in range(len(rewards) - 1, -1, -1):
cdr = cdr * discount_rate + rewards[i]
reverse_discounted.append(cdr)
discounted_rewards.extend(reversed(reverse_discounted))
return discounted_rewards
class EagerPolicyNetwork(tf.keras.Model):
"""Policy network for the cart-pole reinforcement learning problem.
The forward path of the network takes an observation from the cart-pole
environment (length-4 vector) and outputs an action.
"""
def __init__(self, hidden_size):
super(EagerPolicyNetwork, self).__init__()
self._hidden_layer = tf.keras.layers.Dense(
hidden_size, activation=tf.nn.elu)
self._output_layer = tf.keras.layers.Dense(1)
def call(self, inputs):
"""Calculates logits and action.
Args:
inputs: Observations from a step in the cart-pole environment, of shape
`(batch_size, input_size)`
Returns:
logits: the logits output by the output layer. This can be viewed as the
likelihood vales of choosing the left (0) action. Shape:
`(batch_size, 1)`.
actions: randomly selected actions ({0, 1}) based on the logits. Shape:
`(batch_size, 1)`.
"""
hidden = self._hidden_layer(inputs)
logits = self._output_layer(hidden)
left_prob = tf.nn.sigmoid(logits)
action_probs = tf.concat([left_prob, 1.0 - left_prob], 1)
self._grad_fn = eager.implicit_gradients(
self._get_cross_entropy_and_save_actions)
actions = tf.multinomial(tf.log(action_probs), 1)
return logits, actions
def _get_cross_entropy_and_save_actions(self, inputs):
logits, actions = self(inputs) # pylint:disable=not-callable
self._current_actions = actions
labels = 1.0 - tf.cast(actions, tf.float32)
return tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
def train(self, cart_pole_env, optimizer, discount_rate, num_games,
max_steps_per_game):
grad_list = None
step_counts = []
discounted_rewards = []
for _ in range(num_games):
obs = cart_pole_env.reset()
game_rewards = []
for step in range(max_steps_per_game):
grads_and_vars = self._grad_fn(tf.constant([obs], dtype=tf.float32))
grads, var_list = zip(*grads_and_vars)
actions = self._current_actions.numpy()[0][0]
if grad_list is None:
grad_list = [[g] for g in grads]
else:
for i in range(len(grads)):
grad_list[i].append(grads[i])
obs, reward, done = cart_pole_env.step(actions)
game_rewards.append(reward)
if reward < 0.1 or done:
step_counts.append(step + 1)
break
discounted_rewards = eager_append_discounted_rewards(
discounted_rewards, game_rewards, discount_rate)
discounted_rewards = tf.stack(discounted_rewards)
mean, variance = tf.nn.moments(discounted_rewards, [0])
normalized_rewards = (discounted_rewards - mean) / tf.sqrt(variance)
for i in range(len(grad_list)):
g = tf.stack(grad_list[i])
r = normalized_rewards
while r.shape.ndims < g.shape.ndims:
r = tf.expand_dims(r, -1)
grad_list[i] = tf.reduce_mean(g * r, axis=0)
optimizer.apply_gradients(
zip(grad_list, var_list), global_step=tf.train.get_global_step())
return tf.stack(step_counts)
def eager_train_model(policy_network, cart_pole_env, optimizer, iterations):
"""Trains the policy network for a given number of iterations."""
mean_steps_per_iteration = []
for _ in range(iterations):
steps_per_game = policy_network.train(
cart_pole_env,
optimizer,
discount_rate=0.95,
num_games=20,
max_steps_per_game=200)
mean_steps_per_iteration.append(tf.reduce_mean(steps_per_game))
return mean_steps_per_iteration
class EagerGymCartpoleEnv(object):
"""An env backed by OpenAI Gym's CartPole environment.
Used to confirm a functional model only.
"""
def __init__(self):
cart_pole_env = gym.make('CartPole-v1')
cart_pole_env.seed(0)
cart_pole_env.reset()
self.env = cart_pole_env
def reset(self):
return self.env.reset()
def step(self, actions):
obs, reward, done, _ = self.env.step(actions)
return obs, reward, done
class EagerRandomCartpoleEnv(object):
"""An environment that returns random actions and never finishes.
Used during benchmarking, it will cause training to run a constant number of
steps.
"""
def reset(self):
return np.random.normal(size=(4,))
def step(self, actions):
with tf.control_dependencies([actions]):
random_obs = np.random.normal(size=(4,))
fixed_reward = 0.001
done = False
return random_obs, fixed_reward, done
def graph_demo_training():
"""Not used in the benchmark. Used to confirm a functional model."""
with tf.Graph().as_default():
tf.set_random_seed(0)
network = GraphPolicyNetwork(hidden_size=5)
network.build((1, 4))
env = GraphGymCartpoleEnv()
opt = tf.train.AdamOptimizer(0.05)
train_ops = graph_train_model(network, env, opt, iterations=5)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
steps_per_iteration = sess.run(train_ops)
for i, steps in enumerate(steps_per_iteration):
print('Step {} iterations: {}'.format(i, steps))
def eager_demo_training():
with context.eager_mode():
network = EagerPolicyNetwork(hidden_size=5)
network.build((1, 4))
env = EagerGymCartpoleEnv()
opt = tf.train.AdamOptimizer(0.05)
steps_per_iteration = eager_train_model(network, env, opt, iterations=5)
for i, steps in enumerate(steps_per_iteration):
print('Step {} iterations: {}'.format(i, steps))
class RLCartPoleBenchmark(benchmark_base.ReportingBenchmark):
"""Actual benchmark.
Trains the RL agent a fixed number of times, on random environments that
result in constant number of steps.
"""
def benchmark_cartpole(self):
def train_session(sess, ops):
return lambda: sess.run(ops)
def train_eager(network, env, opt):
return lambda: eager_train_model(network, env, opt, iterations=10)
for model_size in (10, 100, 1000):
with tf.Graph().as_default():
network = GraphPolicyNetwork(hidden_size=model_size)
network.build((1, 4))
env = GraphRandomCartpoleEnv()
opt = tf.train.AdamOptimizer(0.05)
train_ops = graph_train_model(network, env, opt, iterations=10)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
self.time_execution(('cartpole', 'autograph', model_size),
train_session(sess, train_ops), 20)
with context.eager_mode():
network = EagerPolicyNetwork(hidden_size=model_size)
network.build((1, 4))
env = EagerRandomCartpoleEnv()
opt = tf.train.AdamOptimizer(0.05)
self.time_execution(('cartpole', 'eager', model_size),
train_eager(network, env, opt), 20)
if __name__ == '__main__':
tf.test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/autograph/examples/benchmarks/cartpole_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kinesis Dataset.
@@KinesisDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kinesis.python.ops.kinesis_dataset_ops import KinesisDataset
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"KinesisDataset",
]
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/kinesis/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for KinesisDataset.
NOTE: boto3 is needed and the test has to be invoked manually:
```
$ bazel test -s --verbose_failures --config=opt \
--action_env=AWS_ACCESS_KEY_ID=XXXXXX \
--action_env=AWS_SECRET_ACCESS_KEY=XXXXXX \
//tensorflow/contrib/kinesis:kinesis_test
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import boto3
from tensorflow.contrib.kinesis.python.ops import kinesis_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class KinesisDatasetTest(test.TestCase):
def testKinesisDatasetOneShard(self):
client = boto3.client('kinesis', region_name='us-east-1')
# Setup the Kinesis with 1 shard.
stream_name = "tf_kinesis_test_1"
client.create_stream(StreamName=stream_name, ShardCount=1)
# Wait until stream exists, default is 10 * 18 seconds.
client.get_waiter('stream_exists').wait(StreamName=stream_name)
for i in range(10):
data = "D" + str(i)
client.put_record(
StreamName=stream_name, Data=data, PartitionKey="TensorFlow" + str(i))
stream = array_ops.placeholder(dtypes.string, shape=[])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = kinesis_dataset_ops.KinesisDataset(
stream, read_indefinitely=False).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(batch_dataset))
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.cached_session() as sess:
# Basic test: read from shard 0 of stream 1.
sess.run(init_op, feed_dict={stream: stream_name, num_epochs: 1})
for i in range(10):
self.assertEqual("D" + str(i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
client.delete_stream(StreamName=stream_name)
# Wait until stream deleted, default is 10 * 18 seconds.
client.get_waiter('stream_not_exists').wait(StreamName=stream_name)
def testKinesisDatasetTwoShards(self):
client = boto3.client('kinesis', region_name='us-east-1')
# Setup the Kinesis with 2 shards.
stream_name = "tf_kinesis_test_2"
client.create_stream(StreamName=stream_name, ShardCount=2)
# Wait until stream exists, default is 10 * 18 seconds.
client.get_waiter('stream_exists').wait(StreamName=stream_name)
for i in range(10):
data = "D" + str(i)
client.put_record(
StreamName=stream_name, Data=data, PartitionKey="TensorFlow" + str(i))
response = client.describe_stream(StreamName=stream_name)
shard_id_0 = response["StreamDescription"]["Shards"][0]["ShardId"]
shard_id_1 = response["StreamDescription"]["Shards"][1]["ShardId"]
stream = array_ops.placeholder(dtypes.string, shape=[])
shard = array_ops.placeholder(dtypes.string, shape=[])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = kinesis_dataset_ops.KinesisDataset(
stream, shard, read_indefinitely=False).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(batch_dataset))
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
data = []
with self.cached_session() as sess:
# Basic test: read from shard 0 of stream 2.
sess.run(
init_op, feed_dict={
stream: stream_name, shard: shard_id_0, num_epochs: 1})
with self.assertRaises(errors.OutOfRangeError):
# Use range(11) to guarantee the OutOfRangeError.
for i in range(11):
data.append(sess.run(get_next))
# Basic test: read from shard 1 of stream 2.
sess.run(
init_op, feed_dict={
stream: stream_name, shard: shard_id_1, num_epochs: 1})
with self.assertRaises(errors.OutOfRangeError):
# Use range(11) to guarantee the OutOfRangeError.
for i in range(11):
data.append(sess.run(get_next))
data.sort()
self.assertEqual(data, ["D" + str(i) for i in range(10)])
client.delete_stream(StreamName=stream_name)
# Wait until stream deleted, default is 10 * 18 seconds.
client.get_waiter('stream_not_exists').wait(StreamName=stream_name)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/kinesis/python/kernel_tests/kinesis_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kinesis Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kinesis.python.ops import gen_dataset_ops
from tensorflow.contrib.kinesis.python.ops import kinesis_op_loader # pylint: disable=unused-import
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.util import deprecation
class KinesisDataset(dataset_ops.DatasetSource):
"""A Kinesis Dataset that consumes the message.
Kinesis is a managed service provided by AWS for data streaming.
This dataset reads messages from Kinesis with each message presented
as a `tf.string`.
For example, we can construct and use the KinesisDataset as follows:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.contrib.kinesis.KinesisDataset(
"kinesis_stream_name", read_indefinitely=False)
for element in dataset:
print(element)
```
Since Kinesis is a data streaming service, data may not be available
at the time it is being read. The argument `read_indefinitely` is
used to control the behavior in this situation. If `read_indefinitely`
is `True`, then `KinesisDataset` will keep retrying to retrieve data
from the stream. If `read_indefinitely` is `False`, an `OutOfRangeError`
is returned immediately instead.
"""
@deprecation.deprecated(
None,
"tf.contrib.kinesis will be removed in 2.0, the support for Kinesis "
"will continue to be provided through the tensorflow/io GitHub project.")
def __init__(self,
stream,
shard="",
read_indefinitely=True,
interval=100000):
"""Create a KinesisDataset.
Args:
stream: A `tf.string` tensor containing the name of the stream.
shard: A `tf.string` tensor containing the id of the shard.
read_indefinitely: If `True`, the Kinesis dataset will keep retry
again on `EOF` after the `interval` period. If `False`, then
the dataset will stop on `EOF`. The default value is `True`.
interval: The interval for the Kinesis Client to wait before
it tries to get records again (in millisecond).
"""
self._stream = ops.convert_to_tensor(
stream, dtype=dtypes.string, name="stream")
self._shard = ops.convert_to_tensor(
shard, dtype=dtypes.string, name="shard")
self._read_indefinitely = ops.convert_to_tensor(
read_indefinitely, dtype=dtypes.bool, name="read_indefinitely")
self._interval = ops.convert_to_tensor(
interval, dtype=dtypes.int64, name="interval")
super(KinesisDataset, self).__init__(self._as_variant_tensor())
def _as_variant_tensor(self):
return gen_dataset_ops.kinesis_dataset(
self._stream, self._shard, self._read_indefinitely, self._interval)
@property
def element_spec(self):
return tensor_spec.TensorSpec([], dtypes.string)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/kinesis/python/ops/kinesis_dataset_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading kinesis ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_dataset_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("../../_dataset_ops.so"))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/kinesis/python/ops/kinesis_op_loader.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the Keras API meant to be a high-level API for TensorFlow.
This module an alias for `tf.keras`, for backwards compatibility.
Detailed documentation and user guides are also available at
[keras.io](https://keras.io).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.keras.api.keras import *
try:
from tensorflow.contrib.keras import python # pylint: disable=g-import-not-at-top
del python
except ImportError:
pass
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the Keras API meant to be a high-level API for TensorFlow.
Detailed documentation and user guides are available at
[keras.io](https://keras.io).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras import activations
from tensorflow.contrib.keras.api.keras import applications
from tensorflow.contrib.keras.api.keras import backend
from tensorflow.contrib.keras.api.keras import callbacks
from tensorflow.contrib.keras.api.keras import constraints
from tensorflow.contrib.keras.api.keras import datasets
from tensorflow.contrib.keras.api.keras import initializers
from tensorflow.contrib.keras.api.keras import layers
from tensorflow.contrib.keras.api.keras import losses
from tensorflow.contrib.keras.api.keras import metrics
from tensorflow.contrib.keras.api.keras import models
from tensorflow.contrib.keras.api.keras import optimizers
from tensorflow.contrib.keras.api.keras import preprocessing
from tensorflow.contrib.keras.api.keras import regularizers
from tensorflow.contrib.keras.api.keras import utils
from tensorflow.contrib.keras.api.keras import wrappers
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Activation functions.
from tensorflow.python.keras.activations import elu
from tensorflow.python.keras.activations import hard_sigmoid
from tensorflow.python.keras.activations import linear
from tensorflow.python.keras.activations import relu
from tensorflow.python.keras.activations import selu
from tensorflow.python.keras.activations import sigmoid
from tensorflow.python.keras.activations import softmax
from tensorflow.python.keras.activations import softplus
from tensorflow.python.keras.activations import softsign
from tensorflow.python.keras.activations import tanh
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.activations import deserialize
from tensorflow.python.keras.activations import serialize
from tensorflow.python.keras.activations import get
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/activations/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Metrics functions.
from tensorflow.python.keras.metrics import binary_accuracy
from tensorflow.python.keras.metrics import binary_crossentropy
from tensorflow.python.keras.metrics import categorical_accuracy
from tensorflow.python.keras.metrics import categorical_crossentropy
from tensorflow.python.keras.metrics import cosine_similarity
from tensorflow.python.keras.metrics import hinge
from tensorflow.python.keras.metrics import kullback_leibler_divergence
from tensorflow.python.keras.metrics import mean_absolute_error
from tensorflow.python.keras.metrics import mean_absolute_percentage_error
from tensorflow.python.keras.metrics import mean_squared_error
from tensorflow.python.keras.metrics import mean_squared_logarithmic_error
from tensorflow.python.keras.metrics import poisson
from tensorflow.python.keras.metrics import sparse_categorical_crossentropy
from tensorflow.python.keras.metrics import sparse_top_k_categorical_accuracy
from tensorflow.python.keras.metrics import squared_hinge
from tensorflow.python.keras.metrics import top_k_categorical_accuracy
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.metrics import deserialize
from tensorflow.python.keras.metrics import serialize
from tensorflow.python.keras.metrics import get
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/metrics/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Loss functions.
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import categorical_hinge
from tensorflow.python.keras.losses import cosine_similarity
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.losses import deserialize
from tensorflow.python.keras.losses import serialize
from tensorflow.python.keras.losses import get
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/losses/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for Keras models, providing compatibility with other frameworks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.wrappers import scikit_learn
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/wrappers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras scikit-learn API wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.python.keras.wrappers.scikit_learn import KerasRegressor
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/wrappers/scikit_learn/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Generic layers.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
# Advanced activations.
from tensorflow.python.keras.layers.advanced_activations import LeakyReLU
from tensorflow.python.keras.layers.advanced_activations import PReLU
from tensorflow.python.keras.layers.advanced_activations import ELU
from tensorflow.python.keras.layers.advanced_activations import ThresholdedReLU
# Convolution layers.
from tensorflow.python.keras.layers.convolutional import Conv1D
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.convolutional import Conv3D
from tensorflow.python.keras.layers.convolutional import Conv2DTranspose
from tensorflow.python.keras.layers.convolutional import Conv3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConv2D
# Convolution layer aliases.
from tensorflow.python.keras.layers.convolutional import Convolution1D
from tensorflow.python.keras.layers.convolutional import Convolution2D
from tensorflow.python.keras.layers.convolutional import Convolution3D
from tensorflow.python.keras.layers.convolutional import Convolution2DTranspose
from tensorflow.python.keras.layers.convolutional import Convolution3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConvolution2D
# Image processing layers.
from tensorflow.python.keras.layers.convolutional import UpSampling1D
from tensorflow.python.keras.layers.convolutional import UpSampling2D
from tensorflow.python.keras.layers.convolutional import UpSampling3D
from tensorflow.python.keras.layers.convolutional import ZeroPadding1D
from tensorflow.python.keras.layers.convolutional import ZeroPadding2D
from tensorflow.python.keras.layers.convolutional import ZeroPadding3D
from tensorflow.python.keras.layers.convolutional import Cropping1D
from tensorflow.python.keras.layers.convolutional import Cropping2D
from tensorflow.python.keras.layers.convolutional import Cropping3D
# Convolutional-recurrent layers.
from tensorflow.python.keras.layers.convolutional_recurrent import ConvLSTM2D
# Core layers.
from tensorflow.python.keras.layers.core import Masking
from tensorflow.python.keras.layers.core import Dropout
from tensorflow.python.keras.layers.core import SpatialDropout1D
from tensorflow.python.keras.layers.core import SpatialDropout2D
from tensorflow.python.keras.layers.core import SpatialDropout3D
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Reshape
from tensorflow.python.keras.layers.core import Permute
from tensorflow.python.keras.layers.core import Flatten
from tensorflow.python.keras.layers.core import RepeatVector
from tensorflow.python.keras.layers.core import Lambda
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.keras.layers.core import ActivityRegularization
# Embedding layers.
from tensorflow.python.keras.layers.embeddings import Embedding
# Locally-connected layers.
from tensorflow.python.keras.layers.local import LocallyConnected1D
from tensorflow.python.keras.layers.local import LocallyConnected2D
# Merge layers.
from tensorflow.python.keras.layers.merge import Add
from tensorflow.python.keras.layers.merge import Multiply
from tensorflow.python.keras.layers.merge import Average
from tensorflow.python.keras.layers.merge import Maximum
from tensorflow.python.keras.layers.merge import Concatenate
from tensorflow.python.keras.layers.merge import Dot
from tensorflow.python.keras.layers.merge import add
from tensorflow.python.keras.layers.merge import multiply
from tensorflow.python.keras.layers.merge import average
from tensorflow.python.keras.layers.merge import maximum
from tensorflow.python.keras.layers.merge import concatenate
from tensorflow.python.keras.layers.merge import dot
# Noise layers.
from tensorflow.python.keras.layers.noise import AlphaDropout
from tensorflow.python.keras.layers.noise import GaussianNoise
from tensorflow.python.keras.layers.noise import GaussianDropout
# Normalization layers.
from tensorflow.python.keras.layers.normalization import BatchNormalization
# Pooling layers.
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling1D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling3D
# Pooling layer aliases.
from tensorflow.python.keras.layers.pooling import MaxPool1D
from tensorflow.python.keras.layers.pooling import MaxPool2D
from tensorflow.python.keras.layers.pooling import MaxPool3D
from tensorflow.python.keras.layers.pooling import AvgPool1D
from tensorflow.python.keras.layers.pooling import AvgPool2D
from tensorflow.python.keras.layers.pooling import AvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool1D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool2D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool3D
# Recurrent layers.
from tensorflow.python.keras.layers.recurrent import SimpleRNN
from tensorflow.python.keras.layers.recurrent import GRU
from tensorflow.python.keras.layers.recurrent import LSTM
# Wrapper functions
from tensorflow.python.keras.layers.wrappers import Wrapper
from tensorflow.python.keras.layers.wrappers import Bidirectional
from tensorflow.python.keras.layers.wrappers import TimeDistributed
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/layers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in constraints functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Constraints functions / callable classes.
from tensorflow.python.keras.constraints import Constraint
from tensorflow.python.keras.constraints import max_norm
from tensorflow.python.keras.constraints import MaxNorm
from tensorflow.python.keras.constraints import min_max_norm
from tensorflow.python.keras.constraints import MinMaxNorm
from tensorflow.python.keras.constraints import non_neg
from tensorflow.python.keras.constraints import NonNeg
from tensorflow.python.keras.constraints import unit_norm
from tensorflow.python.keras.constraints import UnitNorm
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.constraints import deserialize
from tensorflow.python.keras.constraints import serialize
from tensorflow.python.keras.constraints import get
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/constraints/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callback classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.callbacks import BaseLogger
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.keras.callbacks import CSVLogger
from tensorflow.python.keras.callbacks import EarlyStopping
from tensorflow.python.keras.callbacks import History
from tensorflow.python.keras.callbacks import LambdaCallback
from tensorflow.python.keras.callbacks import LearningRateScheduler
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.callbacks import ProgbarLogger
from tensorflow.python.keras.callbacks import ReduceLROnPlateau
from tensorflow.python.keras.callbacks import RemoteMonitor
from tensorflow.python.keras.callbacks import TensorBoard
from tensorflow.python.keras.callbacks import TerminateOnNaN
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/callbacks/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.datasets import boston_housing
from tensorflow.contrib.keras.api.keras.datasets import cifar10
from tensorflow.contrib.keras.api.keras.datasets import cifar100
from tensorflow.contrib.keras.api.keras.datasets import imdb
from tensorflow.contrib.keras.api.keras.datasets import mnist
from tensorflow.contrib.keras.api.keras.datasets import reuters
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/datasets/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST handwritten digits classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.mnist import load_data
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/datasets/mnist/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Boston housing price regression dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.boston_housing import load_data
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/datasets/boston_housing/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reuters newswire topic classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.reuters import get_word_index
from tensorflow.python.keras.datasets.reuters import load_data
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/datasets/reuters/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR100 small image classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.cifar100 import load_data
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/datasets/cifar100/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMDB movie review sentiment classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.imdb import get_word_index
from tensorflow.python.keras.datasets.imdb import load_data
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/datasets/imdb/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR10 small image classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.cifar10 import load_data
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/datasets/cifar10/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.data_utils import SequenceEnqueuer
from tensorflow.python.keras.utils.generic_utils import custom_object_scope
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import get_custom_objects
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.io_utils import HDF5Matrix
from tensorflow.python.keras.utils.layer_utils import convert_all_kernels_in_model
from tensorflow.python.keras.utils.np_utils import normalize
from tensorflow.python.keras.utils.np_utils import to_categorical
from tensorflow.python.keras.utils.vis_utils import plot_model
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/utils/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras backend API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=redefined-builtin
from tensorflow.python.keras.backend import abs
from tensorflow.python.keras.backend import all
from tensorflow.python.keras.backend import any
from tensorflow.python.keras.backend import arange
from tensorflow.python.keras.backend import argmax
from tensorflow.python.keras.backend import argmin
from tensorflow.python.keras.backend import backend
from tensorflow.python.keras.backend import batch_dot
from tensorflow.python.keras.backend import batch_flatten
from tensorflow.python.keras.backend import batch_get_value
from tensorflow.python.keras.backend import batch_normalization
from tensorflow.python.keras.backend import batch_set_value
from tensorflow.python.keras.backend import bias_add
from tensorflow.python.keras.backend import binary_crossentropy
from tensorflow.python.keras.backend import cast
from tensorflow.python.keras.backend import cast_to_floatx
from tensorflow.python.keras.backend import categorical_crossentropy
from tensorflow.python.keras.backend import clear_session
from tensorflow.python.keras.backend import clip
from tensorflow.python.keras.backend import concatenate
from tensorflow.python.keras.backend import constant
from tensorflow.python.keras.backend import conv1d
from tensorflow.python.keras.backend import conv2d
from tensorflow.python.keras.backend import conv2d_transpose
from tensorflow.python.keras.backend import conv3d
from tensorflow.python.keras.backend import cos
from tensorflow.python.keras.backend import count_params
from tensorflow.python.keras.backend import ctc_batch_cost
from tensorflow.python.keras.backend import ctc_decode
from tensorflow.python.keras.backend import ctc_label_dense_to_sparse
from tensorflow.python.keras.backend import dot
from tensorflow.python.keras.backend import dropout
from tensorflow.python.keras.backend import dtype
from tensorflow.python.keras.backend import elu
from tensorflow.python.keras.backend import epsilon
from tensorflow.python.keras.backend import equal
from tensorflow.python.keras.backend import eval
from tensorflow.python.keras.backend import exp
from tensorflow.python.keras.backend import expand_dims
from tensorflow.python.keras.backend import eye
from tensorflow.python.keras.backend import flatten
from tensorflow.python.keras.backend import floatx
from tensorflow.python.keras.backend import foldl
from tensorflow.python.keras.backend import foldr
from tensorflow.python.keras.backend import function
from tensorflow.python.keras.backend import gather
from tensorflow.python.keras.backend import get_session
from tensorflow.python.keras.backend import get_uid
from tensorflow.python.keras.backend import get_value
from tensorflow.python.keras.backend import gradients
from tensorflow.python.keras.backend import greater
from tensorflow.python.keras.backend import greater_equal
from tensorflow.python.keras.backend import hard_sigmoid
from tensorflow.python.keras.backend import image_data_format
from tensorflow.python.keras.backend import in_test_phase
from tensorflow.python.keras.backend import in_top_k
from tensorflow.python.keras.backend import in_train_phase
from tensorflow.python.keras.backend import int_shape
from tensorflow.python.keras.backend import is_sparse
from tensorflow.python.keras.backend import l2_normalize
from tensorflow.python.keras.backend import learning_phase
from tensorflow.python.keras.backend import less
from tensorflow.python.keras.backend import less_equal
from tensorflow.python.keras.backend import log
from tensorflow.python.keras.backend import manual_variable_initialization
from tensorflow.python.keras.backend import map_fn
from tensorflow.python.keras.backend import max
from tensorflow.python.keras.backend import maximum
from tensorflow.python.keras.backend import mean
from tensorflow.python.keras.backend import min
from tensorflow.python.keras.backend import minimum
from tensorflow.python.keras.backend import moving_average_update
from tensorflow.python.keras.backend import name_scope
from tensorflow.python.keras.backend import ndim
from tensorflow.python.keras.backend import normalize_batch_in_training
from tensorflow.python.keras.backend import not_equal
from tensorflow.python.keras.backend import one_hot
from tensorflow.python.keras.backend import ones
from tensorflow.python.keras.backend import ones_like
from tensorflow.python.keras.backend import permute_dimensions
from tensorflow.python.keras.backend import placeholder
from tensorflow.python.keras.backend import pool2d
from tensorflow.python.keras.backend import pool3d
from tensorflow.python.keras.backend import pow
from tensorflow.python.keras.backend import print_tensor
from tensorflow.python.keras.backend import prod
from tensorflow.python.keras.backend import random_binomial
from tensorflow.python.keras.backend import random_normal
from tensorflow.python.keras.backend import random_normal_variable
from tensorflow.python.keras.backend import random_uniform
from tensorflow.python.keras.backend import random_uniform_variable
from tensorflow.python.keras.backend import relu
from tensorflow.python.keras.backend import repeat
from tensorflow.python.keras.backend import repeat_elements
from tensorflow.python.keras.backend import reset_uids
from tensorflow.python.keras.backend import reshape
from tensorflow.python.keras.backend import resize_images
from tensorflow.python.keras.backend import resize_volumes
from tensorflow.python.keras.backend import reverse
from tensorflow.python.keras.backend import rnn
from tensorflow.python.keras.backend import round
from tensorflow.python.keras.backend import separable_conv2d
from tensorflow.python.keras.backend import set_epsilon
from tensorflow.python.keras.backend import set_floatx
from tensorflow.python.keras.backend import set_image_data_format
from tensorflow.python.keras.backend import set_learning_phase
from tensorflow.python.keras.backend import set_session
from tensorflow.python.keras.backend import set_value
from tensorflow.python.keras.backend import shape
from tensorflow.python.keras.backend import sigmoid
from tensorflow.python.keras.backend import sign
from tensorflow.python.keras.backend import sin
from tensorflow.python.keras.backend import softmax
from tensorflow.python.keras.backend import softplus
from tensorflow.python.keras.backend import softsign
from tensorflow.python.keras.backend import sparse_categorical_crossentropy
from tensorflow.python.keras.backend import spatial_2d_padding
from tensorflow.python.keras.backend import spatial_3d_padding
from tensorflow.python.keras.backend import sqrt
from tensorflow.python.keras.backend import square
from tensorflow.python.keras.backend import squeeze
from tensorflow.python.keras.backend import stack
from tensorflow.python.keras.backend import std
from tensorflow.python.keras.backend import stop_gradient
from tensorflow.python.keras.backend import sum
from tensorflow.python.keras.backend import switch
from tensorflow.python.keras.backend import tanh
from tensorflow.python.keras.backend import temporal_padding
from tensorflow.python.keras.backend import to_dense
from tensorflow.python.keras.backend import transpose
from tensorflow.python.keras.backend import truncated_normal
from tensorflow.python.keras.backend import update
from tensorflow.python.keras.backend import update_add
from tensorflow.python.keras.backend import update_sub
from tensorflow.python.keras.backend import var
from tensorflow.python.keras.backend import variable
from tensorflow.python.keras.backend import zeros
from tensorflow.python.keras.backend import zeros_like
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/backend/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras models API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.models import model_from_config
from tensorflow.python.keras.models import model_from_json
from tensorflow.python.keras.models import model_from_yaml
from tensorflow.python.keras.models import save_model
from tensorflow.python.keras.models import Sequential
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/models/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Optimizer classes.
from tensorflow.python.keras.optimizers import Adadelta
from tensorflow.python.keras.optimizers import Adagrad
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.optimizers import Adamax
from tensorflow.python.keras.optimizers import Nadam
from tensorflow.python.keras.optimizers import Optimizer
from tensorflow.python.keras.optimizers import RMSprop
from tensorflow.python.keras.optimizers import SGD
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.optimizers import deserialize
from tensorflow.python.keras.optimizers import serialize
from tensorflow.python.keras.optimizers import get
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/optimizers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Regularizer functions / callable classes.
from tensorflow.python.keras.regularizers import L1L2
from tensorflow.python.keras.regularizers import Regularizer
# Functional interface.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.regularizers import l1
from tensorflow.python.keras.regularizers import l2
from tensorflow.python.keras.regularizers import l1_l2
# Auxiliary utils.
from tensorflow.python.keras.regularizers import deserialize
from tensorflow.python.keras.regularizers import serialize
from tensorflow.python.keras.regularizers import get
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/regularizers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Applications are canned architectures with pre-trained weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.applications import inception_v3
from tensorflow.contrib.keras.api.keras.applications import mobilenet
from tensorflow.contrib.keras.api.keras.applications import resnet50
from tensorflow.contrib.keras.api.keras.applications import vgg16
from tensorflow.contrib.keras.api.keras.applications import vgg19
from tensorflow.contrib.keras.api.keras.applications import xception
from tensorflow.contrib.keras.api.keras.applications.inception_v3 import InceptionV3
from tensorflow.contrib.keras.api.keras.applications.mobilenet import MobileNet
from tensorflow.contrib.keras.api.keras.applications.resnet50 import ResNet50
from tensorflow.contrib.keras.api.keras.applications.vgg16 import VGG16
from tensorflow.contrib.keras.api.keras.applications.vgg19 import VGG19
from tensorflow.contrib.keras.api.keras.applications.xception import Xception
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/applications/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.resnet import decode_predictions
from tensorflow.python.keras.applications.resnet import preprocess_input
from tensorflow.python.keras.applications.resnet import ResNet50
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/applications/resnet50/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.mobilenet import decode_predictions
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.mobilenet import preprocess_input
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/applications/mobilenet/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception V3 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.inception_v3 import decode_predictions
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.inception_v3 import preprocess_input
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/applications/inception_v3/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG16 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.vgg16 import decode_predictions
from tensorflow.python.keras.applications.vgg16 import preprocess_input
from tensorflow.python.keras.applications.vgg16 import VGG16
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/applications/vgg16/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG19 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.vgg19 import decode_predictions
from tensorflow.python.keras.applications.vgg19 import preprocess_input
from tensorflow.python.keras.applications.vgg19 import VGG19
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/applications/vgg19/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Xception Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.xception import decode_predictions
from tensorflow.python.keras.applications.xception import preprocess_input
from tensorflow.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/applications/xception/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.preprocessing import image
from tensorflow.contrib.keras.api.keras.preprocessing import sequence
from tensorflow.contrib.keras.api.keras.preprocessing import text
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/preprocessing/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for image data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.preprocessing.image import array_to_img
from tensorflow.python.keras.preprocessing.image import DirectoryIterator
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.preprocessing.image import img_to_array
from tensorflow.python.keras.preprocessing.image import Iterator
from tensorflow.python.keras.preprocessing.image import load_img
from tensorflow.python.keras.preprocessing.image import NumpyArrayIterator
from tensorflow.python.keras.preprocessing.image import random_channel_shift
from tensorflow.python.keras.preprocessing.image import random_rotation
from tensorflow.python.keras.preprocessing.image import random_shear
from tensorflow.python.keras.preprocessing.image import random_shift
from tensorflow.python.keras.preprocessing.image import random_zoom
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/preprocessing/image/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for text data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.preprocessing.text import one_hot
from tensorflow.python.keras.preprocessing.text import text_to_word_sequence
from tensorflow.python.keras.preprocessing.text import Tokenizer
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/preprocessing/text/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for sequence data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.preprocessing.sequence import make_sampling_table
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.preprocessing.sequence import skipgrams
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/preprocessing/sequence/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Initializer functions / callable classes.
from tensorflow.python.keras.initializers import Constant
from tensorflow.python.keras.initializers import Identity
from tensorflow.python.keras.initializers import Initializer
from tensorflow.python.keras.initializers import Ones
from tensorflow.python.keras.initializers import Orthogonal
from tensorflow.python.keras.initializers import RandomNormal
from tensorflow.python.keras.initializers import RandomUniform
from tensorflow.python.keras.initializers import TruncatedNormal
from tensorflow.python.keras.initializers import VarianceScaling
from tensorflow.python.keras.initializers import Zeros
# Functional interface.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.initializers import glorot_normal
from tensorflow.python.keras.initializers import glorot_uniform
from tensorflow.python.keras.initializers import he_normal
from tensorflow.python.keras.initializers import he_uniform
from tensorflow.python.keras.initializers import lecun_normal
from tensorflow.python.keras.initializers import lecun_uniform
# Auxiliary utils.
from tensorflow.python.keras.initializers import deserialize
from tensorflow.python.keras.initializers import serialize
from tensorflow.python.keras.initializers import get
del absolute_import
del division
del print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/keras/api/keras/initializers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for evaluation metrics and summary statistics.
See the
[Contrib Metrics](https://tensorflow.org/api_guides/python/contrib.metrics)
guide.
@@auc_with_confidence_intervals
@@streaming_accuracy
@@streaming_mean
@@streaming_recall
@@streaming_recall_at_thresholds
@@streaming_precision
@@streaming_precision_at_thresholds
@@streaming_false_positive_rate
@@streaming_false_positive_rate_at_thresholds
@@streaming_false_negative_rate
@@streaming_false_negative_rate_at_thresholds
@@streaming_auc
@@streaming_dynamic_auc
@@streaming_curve_points
@@streaming_recall_at_k
@@streaming_mean_absolute_error
@@streaming_mean_iou
@@streaming_mean_relative_error
@@streaming_mean_squared_error
@@streaming_mean_tensor
@@streaming_root_mean_squared_error
@@streaming_covariance
@@streaming_pearson_correlation
@@streaming_mean_cosine_distance
@@streaming_percentage_less
@@streaming_sensitivity_at_specificity
@@streaming_sparse_average_precision_at_k
@@streaming_sparse_average_precision_at_top_k
@@streaming_sparse_precision_at_k
@@streaming_sparse_precision_at_top_k
@@streaming_sparse_recall_at_k
@@streaming_specificity_at_sensitivity
@@streaming_concat
@@streaming_false_negatives
@@streaming_false_negatives_at_thresholds
@@streaming_false_positives
@@streaming_false_positives_at_thresholds
@@streaming_true_negatives
@@streaming_true_negatives_at_thresholds
@@streaming_true_positives
@@streaming_true_positives_at_thresholds
@@sparse_recall_at_top_k
@@auc_using_histogram
@@accuracy
@@aggregate_metrics
@@aggregate_metric_map
@@confusion_matrix
@@f1_score
@@set_difference
@@set_intersection
@@set_size
@@set_union
@@cohen_kappa
@@count
@@precision_recall_at_equal_thresholds
@@recall_at_precision
@@precision_at_recall
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
from tensorflow.contrib.metrics.python.metrics import *
# pylint: enable=wildcard-import
from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix
from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics
from tensorflow.contrib.metrics.python.ops.metric_ops import auc_with_confidence_intervals
from tensorflow.contrib.metrics.python.ops.metric_ops import cohen_kappa
from tensorflow.contrib.metrics.python.ops.metric_ops import count
from tensorflow.contrib.metrics.python.ops.metric_ops import precision_at_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import precision_recall_at_equal_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import recall_at_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import sparse_recall_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_dynamic_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negative_rate
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negative_rate_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positive_rate
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positive_rate_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.set_ops import set_difference
from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection
from tensorflow.contrib.metrics.python.ops.set_ops import set_size
from tensorflow.contrib.metrics.python.ops.set_ops import set_union
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/metrics/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.