python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.data_utils import SequenceEnqueuer
from tensorflow.python.keras.utils.generic_utils import custom_object_scope
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import get_custom_objects
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.io_utils import HDF5Matrix
from tensorflow.python.keras.utils.layer_utils import convert_all_kernels_in_model
from tensorflow.python.keras.utils.np_utils import normalize
from tensorflow.python.keras.utils.np_utils import to_categorical
from tensorflow.python.keras.utils.vis_utils import plot_model
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/utils/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras backend API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=redefined-builtin
from tensorflow.python.keras.backend import abs
from tensorflow.python.keras.backend import all
from tensorflow.python.keras.backend import any
from tensorflow.python.keras.backend import arange
from tensorflow.python.keras.backend import argmax
from tensorflow.python.keras.backend import argmin
from tensorflow.python.keras.backend import backend
from tensorflow.python.keras.backend import batch_dot
from tensorflow.python.keras.backend import batch_flatten
from tensorflow.python.keras.backend import batch_get_value
from tensorflow.python.keras.backend import batch_normalization
from tensorflow.python.keras.backend import batch_set_value
from tensorflow.python.keras.backend import bias_add
from tensorflow.python.keras.backend import binary_crossentropy
from tensorflow.python.keras.backend import cast
from tensorflow.python.keras.backend import cast_to_floatx
from tensorflow.python.keras.backend import categorical_crossentropy
from tensorflow.python.keras.backend import clear_session
from tensorflow.python.keras.backend import clip
from tensorflow.python.keras.backend import concatenate
from tensorflow.python.keras.backend import constant
from tensorflow.python.keras.backend import conv1d
from tensorflow.python.keras.backend import conv2d
from tensorflow.python.keras.backend import conv2d_transpose
from tensorflow.python.keras.backend import conv3d
from tensorflow.python.keras.backend import cos
from tensorflow.python.keras.backend import count_params
from tensorflow.python.keras.backend import ctc_batch_cost
from tensorflow.python.keras.backend import ctc_decode
from tensorflow.python.keras.backend import ctc_label_dense_to_sparse
from tensorflow.python.keras.backend import dot
from tensorflow.python.keras.backend import dropout
from tensorflow.python.keras.backend import dtype
from tensorflow.python.keras.backend import elu
from tensorflow.python.keras.backend import epsilon
from tensorflow.python.keras.backend import equal
from tensorflow.python.keras.backend import eval
from tensorflow.python.keras.backend import exp
from tensorflow.python.keras.backend import expand_dims
from tensorflow.python.keras.backend import eye
from tensorflow.python.keras.backend import flatten
from tensorflow.python.keras.backend import floatx
from tensorflow.python.keras.backend import foldl
from tensorflow.python.keras.backend import foldr
from tensorflow.python.keras.backend import function
from tensorflow.python.keras.backend import gather
from tensorflow.python.keras.backend import get_session
from tensorflow.python.keras.backend import get_uid
from tensorflow.python.keras.backend import get_value
from tensorflow.python.keras.backend import gradients
from tensorflow.python.keras.backend import greater
from tensorflow.python.keras.backend import greater_equal
from tensorflow.python.keras.backend import hard_sigmoid
from tensorflow.python.keras.backend import image_data_format
from tensorflow.python.keras.backend import in_test_phase
from tensorflow.python.keras.backend import in_top_k
from tensorflow.python.keras.backend import in_train_phase
from tensorflow.python.keras.backend import int_shape
from tensorflow.python.keras.backend import is_sparse
from tensorflow.python.keras.backend import l2_normalize
from tensorflow.python.keras.backend import learning_phase
from tensorflow.python.keras.backend import less
from tensorflow.python.keras.backend import less_equal
from tensorflow.python.keras.backend import log
from tensorflow.python.keras.backend import manual_variable_initialization
from tensorflow.python.keras.backend import map_fn
from tensorflow.python.keras.backend import max
from tensorflow.python.keras.backend import maximum
from tensorflow.python.keras.backend import mean
from tensorflow.python.keras.backend import min
from tensorflow.python.keras.backend import minimum
from tensorflow.python.keras.backend import moving_average_update
from tensorflow.python.keras.backend import name_scope
from tensorflow.python.keras.backend import ndim
from tensorflow.python.keras.backend import normalize_batch_in_training
from tensorflow.python.keras.backend import not_equal
from tensorflow.python.keras.backend import one_hot
from tensorflow.python.keras.backend import ones
from tensorflow.python.keras.backend import ones_like
from tensorflow.python.keras.backend import permute_dimensions
from tensorflow.python.keras.backend import placeholder
from tensorflow.python.keras.backend import pool2d
from tensorflow.python.keras.backend import pool3d
from tensorflow.python.keras.backend import pow
from tensorflow.python.keras.backend import print_tensor
from tensorflow.python.keras.backend import prod
from tensorflow.python.keras.backend import random_binomial
from tensorflow.python.keras.backend import random_normal
from tensorflow.python.keras.backend import random_normal_variable
from tensorflow.python.keras.backend import random_uniform
from tensorflow.python.keras.backend import random_uniform_variable
from tensorflow.python.keras.backend import relu
from tensorflow.python.keras.backend import repeat
from tensorflow.python.keras.backend import repeat_elements
from tensorflow.python.keras.backend import reset_uids
from tensorflow.python.keras.backend import reshape
from tensorflow.python.keras.backend import resize_images
from tensorflow.python.keras.backend import resize_volumes
from tensorflow.python.keras.backend import reverse
from tensorflow.python.keras.backend import rnn
from tensorflow.python.keras.backend import round
from tensorflow.python.keras.backend import separable_conv2d
from tensorflow.python.keras.backend import set_epsilon
from tensorflow.python.keras.backend import set_floatx
from tensorflow.python.keras.backend import set_image_data_format
from tensorflow.python.keras.backend import set_learning_phase
from tensorflow.python.keras.backend import set_session
from tensorflow.python.keras.backend import set_value
from tensorflow.python.keras.backend import shape
from tensorflow.python.keras.backend import sigmoid
from tensorflow.python.keras.backend import sign
from tensorflow.python.keras.backend import sin
from tensorflow.python.keras.backend import softmax
from tensorflow.python.keras.backend import softplus
from tensorflow.python.keras.backend import softsign
from tensorflow.python.keras.backend import sparse_categorical_crossentropy
from tensorflow.python.keras.backend import spatial_2d_padding
from tensorflow.python.keras.backend import spatial_3d_padding
from tensorflow.python.keras.backend import sqrt
from tensorflow.python.keras.backend import square
from tensorflow.python.keras.backend import squeeze
from tensorflow.python.keras.backend import stack
from tensorflow.python.keras.backend import std
from tensorflow.python.keras.backend import stop_gradient
from tensorflow.python.keras.backend import sum
from tensorflow.python.keras.backend import switch
from tensorflow.python.keras.backend import tanh
from tensorflow.python.keras.backend import temporal_padding
from tensorflow.python.keras.backend import to_dense
from tensorflow.python.keras.backend import transpose
from tensorflow.python.keras.backend import truncated_normal
from tensorflow.python.keras.backend import update
from tensorflow.python.keras.backend import update_add
from tensorflow.python.keras.backend import update_sub
from tensorflow.python.keras.backend import var
from tensorflow.python.keras.backend import variable
from tensorflow.python.keras.backend import zeros
from tensorflow.python.keras.backend import zeros_like
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/backend/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras models API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.models import model_from_config
from tensorflow.python.keras.models import model_from_json
from tensorflow.python.keras.models import model_from_yaml
from tensorflow.python.keras.models import save_model
from tensorflow.python.keras.models import Sequential
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/models/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Optimizer classes.
from tensorflow.python.keras.optimizers import Adadelta
from tensorflow.python.keras.optimizers import Adagrad
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.optimizers import Adamax
from tensorflow.python.keras.optimizers import Nadam
from tensorflow.python.keras.optimizers import Optimizer
from tensorflow.python.keras.optimizers import RMSprop
from tensorflow.python.keras.optimizers import SGD
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.optimizers import deserialize
from tensorflow.python.keras.optimizers import serialize
from tensorflow.python.keras.optimizers import get
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/optimizers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Regularizer functions / callable classes.
from tensorflow.python.keras.regularizers import L1L2
from tensorflow.python.keras.regularizers import Regularizer
# Functional interface.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.regularizers import l1
from tensorflow.python.keras.regularizers import l2
from tensorflow.python.keras.regularizers import l1_l2
# Auxiliary utils.
from tensorflow.python.keras.regularizers import deserialize
from tensorflow.python.keras.regularizers import serialize
from tensorflow.python.keras.regularizers import get
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/regularizers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Applications are canned architectures with pre-trained weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.applications import inception_v3
from tensorflow.contrib.keras.api.keras.applications import mobilenet
from tensorflow.contrib.keras.api.keras.applications import resnet50
from tensorflow.contrib.keras.api.keras.applications import vgg16
from tensorflow.contrib.keras.api.keras.applications import vgg19
from tensorflow.contrib.keras.api.keras.applications import xception
from tensorflow.contrib.keras.api.keras.applications.inception_v3 import InceptionV3
from tensorflow.contrib.keras.api.keras.applications.mobilenet import MobileNet
from tensorflow.contrib.keras.api.keras.applications.resnet50 import ResNet50
from tensorflow.contrib.keras.api.keras.applications.vgg16 import VGG16
from tensorflow.contrib.keras.api.keras.applications.vgg19 import VGG19
from tensorflow.contrib.keras.api.keras.applications.xception import Xception
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/applications/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.resnet import decode_predictions
from tensorflow.python.keras.applications.resnet import preprocess_input
from tensorflow.python.keras.applications.resnet import ResNet50
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/applications/resnet50/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.mobilenet import decode_predictions
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.mobilenet import preprocess_input
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/applications/mobilenet/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception V3 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.inception_v3 import decode_predictions
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.inception_v3 import preprocess_input
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/applications/inception_v3/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG16 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.vgg16 import decode_predictions
from tensorflow.python.keras.applications.vgg16 import preprocess_input
from tensorflow.python.keras.applications.vgg16 import VGG16
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/applications/vgg16/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG19 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.vgg19 import decode_predictions
from tensorflow.python.keras.applications.vgg19 import preprocess_input
from tensorflow.python.keras.applications.vgg19 import VGG19
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/applications/vgg19/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Xception Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.xception import decode_predictions
from tensorflow.python.keras.applications.xception import preprocess_input
from tensorflow.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/applications/xception/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.preprocessing import image
from tensorflow.contrib.keras.api.keras.preprocessing import sequence
from tensorflow.contrib.keras.api.keras.preprocessing import text
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/preprocessing/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for image data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.preprocessing.image import array_to_img
from tensorflow.python.keras.preprocessing.image import DirectoryIterator
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.preprocessing.image import img_to_array
from tensorflow.python.keras.preprocessing.image import Iterator
from tensorflow.python.keras.preprocessing.image import load_img
from tensorflow.python.keras.preprocessing.image import NumpyArrayIterator
from tensorflow.python.keras.preprocessing.image import random_channel_shift
from tensorflow.python.keras.preprocessing.image import random_rotation
from tensorflow.python.keras.preprocessing.image import random_shear
from tensorflow.python.keras.preprocessing.image import random_shift
from tensorflow.python.keras.preprocessing.image import random_zoom
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/preprocessing/image/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for text data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.preprocessing.text import one_hot
from tensorflow.python.keras.preprocessing.text import text_to_word_sequence
from tensorflow.python.keras.preprocessing.text import Tokenizer
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/preprocessing/text/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for sequence data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.preprocessing.sequence import make_sampling_table
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.preprocessing.sequence import skipgrams
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/preprocessing/sequence/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Initializer functions / callable classes.
from tensorflow.python.keras.initializers import Constant
from tensorflow.python.keras.initializers import Identity
from tensorflow.python.keras.initializers import Initializer
from tensorflow.python.keras.initializers import Ones
from tensorflow.python.keras.initializers import Orthogonal
from tensorflow.python.keras.initializers import RandomNormal
from tensorflow.python.keras.initializers import RandomUniform
from tensorflow.python.keras.initializers import TruncatedNormal
from tensorflow.python.keras.initializers import VarianceScaling
from tensorflow.python.keras.initializers import Zeros
# Functional interface.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.initializers import glorot_normal
from tensorflow.python.keras.initializers import glorot_uniform
from tensorflow.python.keras.initializers import he_normal
from tensorflow.python.keras.initializers import he_uniform
from tensorflow.python.keras.initializers import lecun_normal
from tensorflow.python.keras.initializers import lecun_uniform
# Auxiliary utils.
from tensorflow.python.keras.initializers import deserialize
from tensorflow.python.keras.initializers import serialize
from tensorflow.python.keras.initializers import get
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/initializers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for evaluation metrics and summary statistics.
See the
[Contrib Metrics](https://tensorflow.org/api_guides/python/contrib.metrics)
guide.
@@auc_with_confidence_intervals
@@streaming_accuracy
@@streaming_mean
@@streaming_recall
@@streaming_recall_at_thresholds
@@streaming_precision
@@streaming_precision_at_thresholds
@@streaming_false_positive_rate
@@streaming_false_positive_rate_at_thresholds
@@streaming_false_negative_rate
@@streaming_false_negative_rate_at_thresholds
@@streaming_auc
@@streaming_dynamic_auc
@@streaming_curve_points
@@streaming_recall_at_k
@@streaming_mean_absolute_error
@@streaming_mean_iou
@@streaming_mean_relative_error
@@streaming_mean_squared_error
@@streaming_mean_tensor
@@streaming_root_mean_squared_error
@@streaming_covariance
@@streaming_pearson_correlation
@@streaming_mean_cosine_distance
@@streaming_percentage_less
@@streaming_sensitivity_at_specificity
@@streaming_sparse_average_precision_at_k
@@streaming_sparse_average_precision_at_top_k
@@streaming_sparse_precision_at_k
@@streaming_sparse_precision_at_top_k
@@streaming_sparse_recall_at_k
@@streaming_specificity_at_sensitivity
@@streaming_concat
@@streaming_false_negatives
@@streaming_false_negatives_at_thresholds
@@streaming_false_positives
@@streaming_false_positives_at_thresholds
@@streaming_true_negatives
@@streaming_true_negatives_at_thresholds
@@streaming_true_positives
@@streaming_true_positives_at_thresholds
@@sparse_recall_at_top_k
@@auc_using_histogram
@@accuracy
@@aggregate_metrics
@@aggregate_metric_map
@@confusion_matrix
@@f1_score
@@set_difference
@@set_intersection
@@set_size
@@set_union
@@cohen_kappa
@@count
@@precision_recall_at_equal_thresholds
@@recall_at_precision
@@precision_at_recall
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
from tensorflow.contrib.metrics.python.metrics import *
# pylint: enable=wildcard-import
from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix
from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics
from tensorflow.contrib.metrics.python.ops.metric_ops import auc_with_confidence_intervals
from tensorflow.contrib.metrics.python.ops.metric_ops import cohen_kappa
from tensorflow.contrib.metrics.python.ops.metric_ops import count
from tensorflow.contrib.metrics.python.ops.metric_ops import precision_at_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import precision_recall_at_equal_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import recall_at_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import sparse_recall_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_dynamic_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negative_rate
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negative_rate_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positive_rate
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positive_rate_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.set_ops import set_difference
from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection
from tensorflow.contrib.metrics.python.ops.set_ops import set_size
from tensorflow.contrib.metrics.python.ops.set_ops import set_union
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/metrics/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import variable_scope
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None, name=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
name: A name for the operation (optional).
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope(name, 'accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
def f1_score(labels, predictions, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the approximately best F1-score across different thresholds.
The f1_score function applies a range of thresholds to the predictions to
convert them from [0, 1] to bool. Precision and recall are computed by
comparing them to the labels. The F1-Score is then defined as
2 * precision * recall / (precision + recall). The best one across the
thresholds is returned.
Disclaimer: In practice it may be desirable to choose the best threshold on
the validation set and evaluate the F1 score with this threshold on a
separate test set. Or it may be desirable to use a fixed threshold (e.g. 0.5).
This function internally creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the pairs of recall and precision values for a linearly spaced set of
thresholds from which the best f1-score is derived.
This value is ultimately returned as `f1-score`, an idempotent operation that
computes the F1-score (computed using the aforementioned variables). The
`num_thresholds` variable controls the degree of discretization with larger
numbers of thresholds more closely approximating the true best F1-score.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the F1-score.
Example usage with a custom estimator:
def model_fn(features, labels, mode):
predictions = make_predictions(features)
loss = make_loss(predictions, labels)
train_op = tf.contrib.training.create_train_op(
total_loss=loss,
optimizer='Adam')
eval_metric_ops = {'f1': f1_score(labels, predictions)}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
estimator = tf.estimator.Estimator(model_fn=model_fn)
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `f1_score` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
f1_score: A scalar `Tensor` representing the current best f1-score across
different thresholds.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches the `f1_score`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'f1', (labels, predictions, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions, labels=labels, weights=weights)
# To account for floating point imprecisions / avoid division by zero.
epsilon = 1e-7
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
thresholds = [0.0 - epsilon] + thresholds + [1.0 + epsilon]
# Confusion matrix.
values, update_ops = metrics_impl._confusion_matrix_at_thresholds( # pylint: disable=protected-access
labels, predictions, thresholds, weights, includes=('tp', 'fp', 'fn'))
# Compute precision and recall at various thresholds.
def compute_best_f1_score(tp, fp, fn, name):
precision_at_t = math_ops.div(tp, epsilon + tp + fp,
name='precision_' + name)
recall_at_t = math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
# Compute F1 score.
f1_at_thresholds = (
2.0 * precision_at_t * recall_at_t /
(precision_at_t + recall_at_t + epsilon))
return math_ops.reduce_max(f1_at_thresholds)
def f1_across_replicas(_, values):
best_f1 = compute_best_f1_score(tp=values['tp'], fp=values['fp'],
fn=values['fn'], name='value')
if metrics_collections:
ops.add_to_collections(metrics_collections, best_f1)
return best_f1
best_f1 = distribution_strategy_context.get_replica_context().merge_call(
f1_across_replicas, args=(values,))
update_op = compute_best_f1_score(tp=update_ops['tp'], fp=update_ops['fp'],
fn=update_ops['fn'], name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return best_f1, update_op
|
tensorflow-master
|
tensorflow/contrib/metrics/python/metrics/classification.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.metrics.python.metrics.classification import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/metrics/python/metrics/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics.classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.metrics import classification
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ClassificationTest(test.TestCase):
def testAccuracy1D(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DBool(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.bool, shape=[None])
labels = array_ops.placeholder(dtypes.bool, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DInt64(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.int64, shape=[None])
labels = array_ops.placeholder(dtypes.int64, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DString(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.string, shape=[None])
labels = array_ops.placeholder(dtypes.string, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(
acc,
feed_dict={pred: ['a', 'b', 'a', 'c'],
labels: ['a', 'c', 'b', 'c']})
self.assertEqual(result, 0.5)
def testAccuracyDtypeMismatch(self):
with self.assertRaises(ValueError):
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int64, shape=[None])
classification.accuracy(pred, labels)
def testAccuracyFloatLabels(self):
with self.assertRaises(ValueError):
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.float32, shape=[None])
classification.accuracy(pred, labels)
def testAccuracy1DWeighted(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
weights = array_ops.placeholder(dtypes.float32, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={
pred: [1, 0, 1, 1],
labels: [1, 1, 0, 1],
weights: [3.0, 1.0, 2.0, 0.0]
})
self.assertEqual(result, 0.5)
def testAccuracy1DWeightedBroadcast(self):
with self.cached_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
weights = array_ops.placeholder(dtypes.float32, shape=[])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={
pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0],
weights: 3.0,
})
self.assertEqual(result, 0.5)
class F1ScoreTest(test.TestCase):
def setUp(self):
super(F1ScoreTest, self).setUp()
np.random.seed(1)
def testVars(self):
classification.f1_score(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_thresholds=3)
expected = {'f1/true_positives:0', 'f1/false_positives:0',
'f1/false_negatives:0'}
self.assertEquals(
expected, set(v.name for v in variables.local_variables()))
self.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
self.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
f1, _ = classification.f1_score(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_thresholds=3,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [f1])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, f1_op = classification.f1_score(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_thresholds=3,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [f1_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes.int64, seed=2)
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([f1_op])
# Then verify idempotency.
initial_f1 = f1.eval()
for _ in range(10):
self.assertAllClose(initial_f1, f1.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes.float32)
labels = constant_op.constant(inputs)
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
self.assertEqual(1, f1.eval())
def testSomeCorrect(self):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
# Threshold 0 will have around 0.5 precision and 1 recall yielding an F1
# score of 2 * 0.5 * 1 / (1 + 0.5).
self.assertAlmostEqual(2 * 0.5 * 1 / (1 + 0.5), f1.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(10000, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes.float32)
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
# Threshold 0 will have around 0.5 precision and 1 recall yielding an F1
# score of 2 * 0.5 * 1 / (1 + 0.5).
self.assertAlmostEqual(2 * 0.5 * 1 / (1 + 0.5), f1.eval(), places=2)
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes.float32)
f1, f1_op = classification.f1_score(predictions, labels, weights,
num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
self.assertAlmostEqual(1.0, f1.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes.float32)
f1, f1_op = classification.f1_score(predictions, labels, weights,
num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
self.assertAlmostEqual(1.0, f1.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes.float32)
labels = array_ops.zeros([4])
f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
sess.run(variables.local_variables_initializer())
sess.run([f1_op])
self.assertAlmostEqual(0.0, f1.eval(), places=5)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [-0.01, 0.5, 1.01]
expected_max_f1 = -1.0
for threshold in thresholds:
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] >= threshold:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
expected_f1 = (2 * expected_prec * expected_rec /
(epsilon + expected_prec + expected_rec))
if expected_f1 > expected_max_f1:
expected_max_f1 = expected_f1
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
tf_predictions, tf_labels = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset
.from_tensor_slices((predictions, labels))
.repeat()
.batch(batch_size)).get_next()
f1, f1_op = classification.f1_score(tf_labels, tf_predictions,
num_thresholds=3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in range(num_batches):
sess.run([f1_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_max_f1, f1.eval(), 2)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/metrics/python/metrics/classification_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.cached_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.cached_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.cached_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.cached_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.cached_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Large tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class StreamingPrecisionRecallAtEqualThresholdsLargeTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testLargeCase(self):
shape = [32, 512, 256, 1]
predictions = random_ops.random_uniform(
shape, 0.0, 1.0, dtype=dtypes_lib.float32)
labels = math_ops.greater(random_ops.random_uniform(shape, 0.0, 1.0), 0.5)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions, num_thresholds=201)
# Run many updates, enough to cause highly inaccurate values if the
# code used float32 for accumulation.
num_updates = 71
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_updates):
sess.run(update_op)
prdata = sess.run(result)
# Since we use random values, we won't know the tp/fp/tn/fn values, but
# tp and fp at threshold 0 should be the total number of positive and
# negative labels, hence their sum should be total number of pixels.
expected_value = 1.0 * np.product(shape) * num_updates
got_value = prdata.tp[0] + prdata.fp[0]
# They should be at least within 1.
self.assertNear(got_value, expected_value, 1.0)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/metrics/python/ops/metric_ops_large_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import confusion_matrix as cm
def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,
name=None, weights=None):
"""Deprecated. Use tf.math.confusion_matrix instead."""
return cm.confusion_matrix(labels=labels, predictions=predictions,
num_classes=num_classes, dtype=dtype, name=name,
weights=weights)
|
tensorflow-master
|
tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,
))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.cached_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples),
np.random.exponential(scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.cached_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (
_enqueue_as_batches(weights, enqueue_ops)
if weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, [
'dynamic_auc/concat_labels/array:0', 'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0', 'dynamic_auc/concat_preds/size:0'
])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2.5, -2.5, 2.5, -2.5], dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.cached_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
def testWithWeights(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
weights = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
tf_weights = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels,
tf_predictions,
weights=tf_weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.uniform(-0.2, 0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
new_weights = np.random.uniform(0.0, 3.0, size=batch_size)
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
weights = np.concatenate([weights, new_weights])
sess.run([tf_labels.assign(new_labels),
tf_predictions.assign(new_predictions),
tf_weights.assign(new_weights)])
sess.run(update_op)
expected_auc = _np_auc(predictions, labels, weights)
self.assertAlmostEqual(expected_auc, auc.eval())
class AucWithConfidenceIntervalsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A AucWithConfidenceIntervalData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(
list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.int64)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.auc_with_confidence_intervals(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testAucAllCorrect(self):
self._testCase(
predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
expected_result={
'auc': 0.66666667,
'lower': 0.27826795,
'upper': 0.91208512,
})
def testAucUnorderedInput(self):
self._testCase(
predictions=[1.0, 0.6, 0., 0.3, 0.4, 0.2, 0.5, 0.3, 0.6, 0.8],
labels=[0, 1, 0, 1, 0, 0, 1, 0, 0, 1],
expected_result={
'auc': 0.66666667,
'lower': 0.27826795,
'upper': 0.91208512,
})
def testAucWithWeights(self):
self._testCase(
predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
weights=[0.5, 0.6, 1.2, 1.5, 2.0, 2.0, 1.5, 1.2, 0.6, 0.5],
expected_result={
'auc': 0.65151515,
'lower': 0.28918604,
'upper': 0.89573906,
})
def testAucEqualOne(self):
self._testCase(
predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
expected_result={
'auc': 1.0,
'lower': 1.0,
'upper': 1.0,
})
def testAucEqualZero(self):
self._testCase(
predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
expected_result={
'auc': 0.0,
'lower': 0.0,
'upper': 0.0,
})
def testNonZeroOnePredictions(self):
self._testCase(
predictions=[2.5, -2.5, .5, -.5, 1],
labels=[1, 0, 1, 0, 0],
expected_result={
'auc': 0.83333333,
'lower': 0.15229267,
'upper': 0.99286517,
})
def testAllLabelsOnes(self):
self._testCase(
predictions=[1., 1., 1., 1., 1.],
labels=[1, 1, 1, 1, 1],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testAllLabelsZeros(self):
self._testCase(
predictions=[0., 0., 0., 0., 0.],
labels=[0, 0, 0, 0, 0],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testWeightSumLessThanOneAll(self):
self._testCase(
predictions=[1., 1., 0., 1., 0., 0.],
labels=[1, 1, 1, 0, 0, 0],
weights=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testWithMultipleUpdates(self):
batch_size = 50
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.auc_with_confidence_intervals(tf_labels,
tf_predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAllClose(expected_auc, auc.auc.eval())
def testExceptionOnFloatLabels(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([0.7, 0, 1, 0, 1])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertRaises(TypeError, sess.run(update_op))
def testExceptionOnGreaterThanOneLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0, 1, 0])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1, 1, 0])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result, eps=None):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
eps: Epsilon value to use for testing output values. If unspecified, use
default from assertAllClose.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
if eps is not None:
self.assertAllClose(expected_values, gotten_dict[key], atol=eps)
else:
self.assertAllClose(expected_values, gotten_dict[key])
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.cached_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {
k: value.eval().tolist()
for k, value in result._asdict().items()
}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def _testCase(self,
predictions,
labels,
expected_result,
dtype=dtypes_lib.float32,
eps=None,
weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type dtype.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
dtype: Data type to use for predictions and weights tensor. Default
is float32.
eps: Epsilon value to use for testing output values. If unspecified, use
default from assertAllClose.
weights: Optional weights tensor.
"""
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(predictions, dtype=dtype)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtype)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
self.assertEqual(gotten_result.tp.dtype, dtype)
self.assertEqual(gotten_result.fp.dtype, dtype)
self.assertEqual(gotten_result.tn.dtype, dtype)
self.assertEqual(gotten_result.fn.dtype, dtype)
self.assertEqual(gotten_result.precision.dtype, dtype)
self.assertEqual(gotten_result.recall.dtype, dtype)
self.assertEqual(gotten_result.thresholds.dtype, dtype)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result, eps=eps)
def testAllTruePositives(self):
self._testCase(
[[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase(
[[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase(
[[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase(
[[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
def testFloat64(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
dtype=dtypes_lib.float64)
def testFloat16(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
dtype=dtypes_lib.float16,
eps=1e-3)
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(
tf_predictions, tf_labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def _test_strict_mode(self, strict_mode, target_precision, expected_recall):
num_thresholds = 11
predictions_values = [.2, .3, .5, .6, .7, .8, .9, .9, .9, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
# Resulting thresholds and the corresponding precision and recall values at
# each threshold:
# Thresholds [0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9]
# precisions: [0.3 0.2 0.1 0 0 0 0 0 0]
# recalls: [1.0 0.7 0.3 0 0 0 0 0 0]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels,
predictions,
num_thresholds=num_thresholds,
precision=target_precision,
strict_mode=strict_mode)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_recall, sess.run(update_op))
self.assertAlmostEqual(expected_recall, recall.eval())
def testStrictMode_Off(self):
# strict_mode is turned off and return the recall at the threshold where the
# precision (0.3) is closest to target precision (0.9). The recall
# corresponding to the threshold is 1.0.
self._test_strict_mode(
strict_mode=False, target_precision=0.9, expected_recall=1.0)
def testStrictMode_OnAndFail(self):
# strict_mode is turned on and we fail to reach the target precision at any
# threshold.
# Target precision: 0.9
# Diff: [-0.6 -0.7 -0.8 -0.9 -0.9 -0.9 -0.9 -0.9 -0.9]
# Reciprocal: [-1.6 -1.4 -1.3 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1]
# Max index: 3 and corresponding precision is: 0 which is smaller than
# target precsion 0.9. As a result, the expected recall is 0.
self._test_strict_mode(
strict_mode=True, target_precision=0.9, expected_recall=.0)
def testStrictMode_OnAndSucceed(self):
# strict_mode is on and we can reach the target precision at certain
# threshold.
# Target precision: 0.2
# Diff: [0.1 0 -0.1 -0.2 -0.2 -0.2 -0.2 -0.2 -0.2]
# Reciprocal: [10 infty -10.0 -5.0 -5.0 -5.0 -5.0 -5.0 -5.0]
# Max index: 1 and corresponding precision is: 0.2 which is no smaller than
# target precsion 0.2. In this case, we return the recall at index 1, which
# is 2.0/3 (0.7).
self._test_strict_mode(
strict_mode=True, target_precision=0.2, expected_recall=2.0 / 3)
class PrecisionAtRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7)
_assert_metric_variables(self,
('precision_at_recall/true_positives:0',
'precision_at_recall/false_negatives:0',
'precision_at_recall/false_positives:0',
'precision_at_recall/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertAlmostEqual(initial_precision, precision.eval(), places=5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, precision.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = 1.0 - predictions
label_prior = math_ops.reduce_mean(labels)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(sess.run(label_prior), sess.run(update_op))
self.assertEqual(sess.run(label_prior), precision.eval())
def testSomeCorrectHighRecall(self):
predictions_values = [0.1, 0.2, 0.5, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, precision.eval())
def testSomeCorrectLowRecall(self):
predictions_values = [0.1, 0.2, 0.7, 0.3, 0.0, 0.1, 0.45, 0.5, 0.6, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0/3, sess.run(update_op))
self.assertAlmostEqual(2.0/3, precision.eval())
def testWeighted_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [
0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.22, 0.25, 0.31, 0.35]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.8, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(34.0/43, sess.run(update_op))
self.assertAlmostEqual(34.0/43, precision.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.cached_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[
0,
], [
1,
], [
2,
]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([
10,
], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [
(ex1 + ex2) / 2 for ex1, ex2 in zip(precision_ex1, precision_ex2)
]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(
shape=(2, None), dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.cached_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def _test_one_label_at_k1_weighted(self, labels):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_one_label_at_k1_weighted_sparse_labels(self):
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
self._test_one_label_at_k1_weighted(sparse_labels)
def test_one_label_at_k1_weighted_dense_labels(self):
dense_labels = np.array([[3], [2]], dtype=np.int64)
self._test_one_label_at_k1_weighted(dense_labels)
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
expected_recall = 0.5
with self.cached_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels, weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels, weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.cached_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels, weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
labels = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov(
[2, 4, 6, 8], [1, 3, 2, 7], fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
labels = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n // stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(
np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(expected_r, actual_r, 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]),
constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 2, 1, 1, 0], [0, 1, 2, 2, 0, 1]], [[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 /
(0 + 5 + 0)), miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1]], [[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)),
miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.cached_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.cached_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.cached_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testReturnType(self):
c, op = metrics.count(array_ops.ones([4, 3]))
self.assertTrue(isinstance(c, ops.Tensor))
self.assertTrue(isinstance(op, ops.Operation) or isinstance(op, ops.Tensor))
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
class CohenKappaTest(test.TestCase):
def _confusion_matrix_to_samples(self, confusion_matrix):
x, y = confusion_matrix.shape
pairs = []
for label in range(x):
for feature in range(y):
pairs += [label, feature] * confusion_matrix[label, feature]
pairs = np.array(pairs).reshape((-1, 2))
return pairs[:, 0], pairs[:, 1]
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2)
_assert_metric_variables(self, (
'cohen_kappa/po:0',
'cohen_kappa/pe_row:0',
'cohen_kappa/pe_col:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
kappa, _ = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [kappa])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_kappa = kappa.eval()
for _ in range(10):
self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)
def testBasic(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
# overall total = 36
# po = [9, 8, 6], sum(po) = 23
# pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]
# finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))
# = (23 - 12.34) / (36 - 12.34)
# = 0.45
# see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF
expect = 0.45
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]
shapes = [
(len(labels,)), # 1-dim
(len(labels), 1)
] # 2-dim
weights = [None, np.ones_like(labels)]
for dtype in dtypes:
for shape in shapes:
for weight in weights:
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
np.reshape(predictions, shape), dtype=dtype)
labels_tensor = constant_op.constant(
np.reshape(labels, shape), dtype=dtype)
kappa, update_op = metrics.cohen_kappa(
labels_tensor, predictions_tensor, 3, weights=weight)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 2)
self.assertAlmostEqual(expect, kappa.eval(), 2)
def testAllCorrect(self):
inputs = np.arange(0, 100) % 4
# confusion matrix
# [[25, 0, 0],
# [0, 25, 0],
# [0, 0, 25]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)
expect = 1.0
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testAllIncorrect(self):
labels = np.arange(0, 100) % 4
predictions = (labels + 1) % 4
# confusion matrix
# [[0, 25, 0],
# [0, 0, 25],
# [25, 0, 0]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)
expect = -0.333333333333
with self.cached_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWeighted(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels, predictions, sample_weight=weights)
expect = 0.453466583385
with self.cached_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(
labels, predictions, 4, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWithMultipleUpdates(self):
confusion_matrix = np.array([[90, 30, 10, 20], [40, 80, 20, 30],
[20, 10, 60, 35], [15, 25, 30, 25]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
num_classes = confusion_matrix.shape[0]
batch_size = num_samples // 10
predictions_t = array_ops.placeholder(
dtypes_lib.float32, shape=(batch_size,))
labels_t = array_ops.placeholder(dtypes_lib.int32, shape=(batch_size,))
weights_t = array_ops.placeholder(dtypes_lib.float32, shape=(batch_size,))
kappa, update_op = metrics.cohen_kappa(
labels_t, predictions_t, num_classes, weights=weights_t)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for idx in range(0, num_samples, batch_size):
batch_start, batch_end = idx, idx + batch_size
sess.run(
update_op,
feed_dict={
labels_t: labels[batch_start:batch_end],
predictions_t: predictions[batch_start:batch_end],
weights_t: weights[batch_start:batch_end]
})
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels_np, predictions_np, sample_weight=weights_np)
expect = 0.289965397924
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testInvalidNumClasses(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaisesRegexp(ValueError, 'num_classes'):
metrics.cohen_kappa(labels, predictions, 1)
def testInvalidDimension(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))
with self.assertRaises(ValueError):
metrics.cohen_kappa(invalid_labels, predictions, 3)
invalid_predictions = array_ops.placeholder(
dtypes_lib.float32, shape=(4, 2))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaises(ValueError):
metrics.cohen_kappa(labels, invalid_predictions, 3)
def testConditionalPackingOptimization(self):
placeholder = array_ops.placeholder(dtypes_lib.float32, [None])
values, update_op = metric_ops.streaming_concat(placeholder)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for feed in range(10):
sess.run(update_op, feed_dict={placeholder: [feed]})
print(sess.run(values))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/metrics/python/ops/metric_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Metrics that use histograms.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
def auc_using_histogram(boolean_labels,
scores,
score_range,
nbins=100,
collections=None,
check_shape=True,
name=None):
"""AUC computed by maintaining histograms.
Rather than computing AUC directly, this Op maintains Variables containing
histograms of the scores associated with `True` and `False` labels. By
comparing these the AUC is generated, with some discretization error.
See: "Efficient AUC Learning Curve Calculation" by Bouckaert.
This AUC Op updates in `O(batch_size + nbins)` time and works well even with
large class imbalance. The accuracy is limited by discretization error due
to finite number of bins. If scores are concentrated in a fewer bins,
accuracy is lower. If this is a concern, we recommend trying different
numbers of bins and comparing results.
Args:
boolean_labels: 1-D boolean `Tensor`. Entry is `True` if the corresponding
record is in class.
scores: 1-D numeric `Tensor`, same shape as boolean_labels.
score_range: `Tensor` of shape `[2]`, same dtype as `scores`. The min/max
values of score that we expect. Scores outside range will be clipped.
nbins: Integer number of bins to use. Accuracy strictly increases as the
number of bins increases.
collections: List of graph collections keys. Internal histogram Variables
are added to these collections. Defaults to `[GraphKeys.LOCAL_VARIABLES]`.
check_shape: Boolean. If `True`, do a runtime shape check on the scores
and labels.
name: A name for this Op. Defaults to "auc_using_histogram".
Returns:
auc: `float32` scalar `Tensor`. Fetching this converts internal histograms
to auc value.
update_op: `Op`, when run, updates internal histograms.
"""
if collections is None:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
with variable_scope.variable_scope(
name, 'auc_using_histogram', [boolean_labels, scores, score_range]):
scores, boolean_labels = tensor_util.remove_squeezable_dimensions(
scores, boolean_labels)
score_range = ops.convert_to_tensor(score_range, name='score_range')
boolean_labels, scores = _check_labels_and_scores(
boolean_labels, scores, check_shape)
hist_true, hist_false = _make_auc_histograms(boolean_labels, scores,
score_range, nbins)
hist_true_acc, hist_false_acc, update_op = _auc_hist_accumulate(hist_true,
hist_false,
nbins,
collections)
auc = _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins)
return auc, update_op
def _check_labels_and_scores(boolean_labels, scores, check_shape):
"""Check the rank of labels/scores, return tensor versions."""
with ops.name_scope('_check_labels_and_scores',
values=[boolean_labels, scores]):
boolean_labels = ops.convert_to_tensor(boolean_labels,
name='boolean_labels')
scores = ops.convert_to_tensor(scores, name='scores')
if boolean_labels.dtype != dtypes.bool:
raise ValueError(
'Argument boolean_labels should have dtype bool. Found: %s',
boolean_labels.dtype)
if check_shape:
labels_rank_1 = control_flow_ops.Assert(
math_ops.equal(1, array_ops.rank(boolean_labels)),
['Argument boolean_labels should have rank 1. Found: ',
boolean_labels.name, array_ops.shape(boolean_labels)])
scores_rank_1 = control_flow_ops.Assert(
math_ops.equal(1, array_ops.rank(scores)),
['Argument scores should have rank 1. Found: ', scores.name,
array_ops.shape(scores)])
with ops.control_dependencies([labels_rank_1, scores_rank_1]):
return boolean_labels, scores
else:
return boolean_labels, scores
def _make_auc_histograms(boolean_labels, scores, score_range, nbins):
"""Create histogram tensors from one batch of labels/scores."""
with variable_scope.variable_scope(
None, 'make_auc_histograms', [boolean_labels, scores, nbins]):
# Histogram of scores for records in this batch with True label.
hist_true = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, boolean_labels),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_true')
# Histogram of scores for records in this batch with False label.
hist_false = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_false')
return hist_true, hist_false
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_scope(
None, 'hist_accumulate', [hist_true, hist_false]):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
shape=[nbins],
dtype=hist_true.dtype,
initializer=init_ops.zeros_initializer(),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
shape=[nbins],
dtype=hist_true.dtype,
initializer=init_ops.zeros_initializer(),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
def _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins):
"""Convert histograms to auc.
Args:
hist_true_acc: `Tensor` holding accumulated histogram of scores for records
that were `True`.
hist_false_acc: `Tensor` holding accumulated histogram of scores for
records that were `False`.
nbins: Integer number of bins in the histograms.
Returns:
Scalar `Tensor` estimating AUC.
"""
# Note that this follows the "Approximating AUC" section in:
# Efficient AUC learning curve calculation, R. R. Bouckaert,
# AI'06 Proceedings of the 19th Australian joint conference on Artificial
# Intelligence: advances in Artificial Intelligence
# Pages 181-191.
# Note that the above paper has an error, and we need to re-order our bins to
# go from high to low score.
# Normalize histogram so we get fraction in each bin.
normed_hist_true = math_ops.truediv(hist_true_acc,
math_ops.reduce_sum(hist_true_acc))
normed_hist_false = math_ops.truediv(hist_false_acc,
math_ops.reduce_sum(hist_false_acc))
# These become delta x, delta y from the paper.
delta_y_t = array_ops.reverse_v2(normed_hist_true, [0], name='delta_y_t')
delta_x_t = array_ops.reverse_v2(normed_hist_false, [0], name='delta_x_t')
# strict_1d_cumsum requires float32 args.
delta_y_t = math_ops.cast(delta_y_t, dtypes.float32)
delta_x_t = math_ops.cast(delta_x_t, dtypes.float32)
# Trapezoidal integration, \int_0^1 0.5 * (y_t + y_{t-1}) dx_t
y_t = _strict_1d_cumsum(delta_y_t, nbins)
first_trap = delta_x_t[0] * y_t[0] / 2.0
other_traps = delta_x_t[1:] * (y_t[1:] + y_t[:nbins - 1]) / 2.0
return math_ops.add(first_trap, math_ops.reduce_sum(other_traps), name='auc')
# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# Also see if cast to float32 above can be removed with new cumsum.
# See: https://github.com/tensorflow/tensorflow/issues/813
def _strict_1d_cumsum(tensor, len_tensor):
"""Cumsum of a 1D tensor with defined shape by padding and convolving."""
# Assumes tensor shape is fully defined.
with ops.name_scope('strict_1d_cumsum', values=[tensor]):
if len_tensor == 0:
return constant_op.constant([])
len_pad = len_tensor - 1
x = array_ops.pad(tensor, [[len_pad, 0]])
h = array_ops.ones_like(x)
return _strict_conv1d(x, h)[:len_tensor]
# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# See: https://github.com/tensorflow/tensorflow/issues/813
def _strict_conv1d(x, h):
"""Return x * h for rank 1 tensors x and h."""
with ops.name_scope('strict_conv1d', values=[x, h]):
x = array_ops.reshape(x, (1, -1, 1, 1))
h = array_ops.reshape(h, (-1, 1, 1, 1))
result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
return array_ops.reshape(result, [-1])
|
tensorflow-master
|
tensorflow/contrib/metrics/python/ops/histogram_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean')
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean_tensor')
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.cast(weights, dtypes.float32), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable([num_thresholds],
dtypes.float32,
name='true_positives')
is_true_positive = math_ops.cast(
math_ops.logical_and(label_is_pos, pred_is_pos), dtypes.float32)
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(
true_positives, math_ops.reduce_sum(is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable([num_thresholds],
dtypes.float32,
name='false_negatives')
is_false_negative = math_ops.cast(
math_ops.logical_and(label_is_pos, pred_is_neg), dtypes.float32)
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(
false_negatives, math_ops.reduce_sum(is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable([num_thresholds],
dtypes.float32,
name='true_negatives')
is_true_negative = math_ops.cast(
math_ops.logical_and(label_is_neg, pred_is_neg), dtypes.float32)
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(
true_negatives, math_ops.reduce_sum(is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable([num_thresholds],
dtypes.float32,
name='false_positives')
is_false_positive = math_ops.cast(
math_ops.logical_and(label_is_neg, pred_is_pos), dtypes.float32)
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(
false_positives, math_ops.reduce_sum(is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '
'the labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
weights: A 1-D `Tensor` of weights whose values are `float64`.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Compute the total weight and the total positive weight.
size = array_ops.size(predictions)
if weights is None:
weights = array_ops.ones_like(labels, dtype=dtypes.float64)
labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(
labels, predictions, weights)
total_weight = math_ops.reduce_sum(weights)
total_positive = math_ops.reduce_sum(
array_ops.where(
math_ops.greater(labels, 0), weights,
array_ops.zeros_like(labels, dtype=dtypes.float64)))
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, keeping the same order for the
# corresponding labels and weights.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
true_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.greater(ordered_labels, 0), ordered_weights,
array_ops.zeros_like(ordered_labels,
dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
if curve == 'ROC':
# Compute the weight of the negatives to the left of every split point and
# the total weight of the negatives number of negatives for computing the
# FPR.
false_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.less(ordered_labels, 1), ordered_weights,
array_ops.zeros_like(
ordered_labels, dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
total_negative = total_weight - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
positives = array_ops.gather(
array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),
splits)
y_axis_values = array_ops.where(
math_ops.greater(splits, 0),
math_ops.truediv(true_positives, positives),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0),
math_ops.equal(total_positive, total_weight)),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None,
weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
weights: A 'Tensor' of non-negative weights whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
if weights is not None:
weights = array_ops.reshape(
math_ops.cast(weights, dtypes.float64), [-1])
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op = control_flow_ops.group(update_labels, update_preds,
update_weights)
else:
weights_accum = None
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(
labels_accum, preds_accum, curve=curve, weights=weights_accum)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
logit_transformation, is_valid):
"""Computes the AUC and asymptotic normally distributed confidence interval.
The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
concept of placement values for each labeled group, as presented by Delong and
Delong (1988). The actual algorithm used is a more computationally efficient
approach presented by Sun and Xu (2014). This could be slow for large batches,
but has the advantage of not having its results degrade depending on the
distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
is_valid: A bool tensor describing whether the input is valid.
Returns:
A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
interval values.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
# pylint: enable=invalid-name
# If all the labels are the same or if number of observations are too few,
# AUC isn't well-defined
size = array_ops.size(predictions, out_type=dtypes.int32)
# Count the total number of positive and negative labels in the input.
total_0 = math_ops.reduce_sum(
math_ops.cast(1 - labels, weights.dtype) * weights)
total_1 = math_ops.reduce_sum(math_ops.cast(labels, weights.dtype) * weights)
# Sort the predictions ascending, as well as
# (i) the corresponding labels and
# (ii) the corresponding weights.
ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
ordered_predictions = array_ops.reverse(
ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# We now compute values required for computing placement values.
# We generate a list of indices (segmented_indices) of increasing order. An
# index is assigned for each unique prediction float value. Prediction
# values that are the same share the same index.
_, segmented_indices = array_ops.unique(ordered_predictions)
# We create 2 tensors of weights. weights_for_true is non-zero for true
# labels. weights_for_false is non-zero for false labels.
float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
float_labels_for_false = 1.0 - float_labels_for_true
weights_for_true = ordered_weights * float_labels_for_true
weights_for_false = ordered_weights * float_labels_for_false
# For each set of weights with the same segmented indices, we add up the
# weight values. Note that for each label, we deliberately rely on weights
# for the opposite label.
weight_totals_for_true = math_ops.segment_sum(weights_for_false,
segmented_indices)
weight_totals_for_false = math_ops.segment_sum(weights_for_true,
segmented_indices)
# These cumulative sums of weights importantly exclude the current weight
# sums.
cum_weight_totals_for_true = math_ops.cumsum(
weight_totals_for_true, exclusive=True)
cum_weight_totals_for_false = math_ops.cumsum(
weight_totals_for_false, exclusive=True)
# Compute placement values using the formula. Values with the same segmented
# indices and labels share the same placement values.
placements_for_true = (
(cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
(math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
placements_for_false = (
(cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
(math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
# We expand the tensors of placement values (for each label) so that their
# shapes match that of predictions.
placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
placements_for_false = array_ops.gather(placements_for_false,
segmented_indices)
# Select placement values based on the label for each index.
placement_values = (
placements_for_true * float_labels_for_true +
placements_for_false * float_labels_for_false)
# Split placement values by labeled groups.
placement_values_0 = placement_values * math_ops.cast(1 - ordered_labels,
weights.dtype)
weights_0 = ordered_weights * math_ops.cast(1 - ordered_labels, weights.dtype)
placement_values_1 = placement_values * math_ops.cast(ordered_labels,
weights.dtype)
weights_1 = ordered_weights * math_ops.cast(ordered_labels, weights.dtype)
# Calculate AUC using placement values
auc_0 = (
math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
(total_0 + _EPSILON))
auc_1 = (
math_ops.reduce_sum(weights_1 * (placement_values_1)) /
(total_1 + _EPSILON))
auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
# Calculate variance and standard error using the placement values.
var_0 = (
math_ops.reduce_sum(
weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
(total_0 - 1. + _EPSILON))
var_1 = (
math_ops.reduce_sum(
weights_1 * math_ops.squared_difference(placement_values_1, auc_1)) /
(total_1 - 1. + _EPSILON))
auc_std_err = math_ops.sqrt((var_0 / (total_0 + _EPSILON)) +
(var_1 / (total_1 + _EPSILON)))
# Calculate asymptotic normal confidence intervals
std_norm_dist = Normal(loc=0., scale=1.)
z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
if logit_transformation:
estimate = math_ops.log(auc / (1. - auc + _EPSILON))
std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
transformed_auc_lower = estimate + (z_value * std_err)
transformed_auc_upper = estimate - (z_value * std_err)
def inverse_logit_transformation(x):
exp_negative = math_ops.exp(math_ops.negative(x))
return 1. / (1. + exp_negative + _EPSILON)
auc_lower = inverse_logit_transformation(transformed_auc_lower)
auc_upper = inverse_logit_transformation(transformed_auc_upper)
else:
estimate = auc
std_err = auc_std_err
auc_lower = estimate + (z_value * std_err)
auc_upper = estimate - (z_value * std_err)
## If estimate is 1 or 0, no variance is present so CI = 1
## n.b. This can be misleading, since number obs can just be too low.
lower = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))), auc, auc_lower)
upper = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))), auc, auc_upper)
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
trivial_value = array_ops.constant(0.0)
return AucData(*control_flow_ops.cond(
is_valid, lambda: [auc, lower, upper], lambda: [trivial_value] * 3))
def auc_with_confidence_intervals(labels,
predictions,
weights=None,
alpha=0.95,
logit_transformation=True,
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the AUC and asymptotic normally distributed confidence interval.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC curve and its confidence interval using
placement values. This has the advantage of being resilient to the
distribution of predictions by aggregating across batches, accumulating labels
and predictions and performing the final calculation using all of the
concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
upper confidence interval values.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
or if `alpha` isn't in the range (0,1).
"""
if not (alpha > 0 and alpha < 1):
raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
if weights is None:
weights = array_ops.ones_like(predictions)
with variable_scope.variable_scope(
name,
default_name='auc_with_confidence_intervals',
values=[labels, predictions, weights]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
total_weight = math_ops.reduce_sum(weights)
weights = array_ops.reshape(weights, [-1])
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op_for_valid_case = control_flow_ops.group(update_labels,
update_preds,
update_weights)
# Only perform updates if this case is valid.
all_labels_positive_or_0 = math_ops.logical_and(
math_ops.equal(math_ops.reduce_min(labels), 0),
math_ops.equal(math_ops.reduce_max(labels), 1))
sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
is_valid = math_ops.logical_and(all_labels_positive_or_0,
sums_of_weights_at_least_1)
update_op = control_flow_ops.cond(
sums_of_weights_at_least_1, lambda: update_op_for_valid_case,
control_flow_ops.no_op)
auc = _compute_placement_auc(
labels_accum,
preds_accum,
weights_accum,
alpha=alpha,
logit_transformation=logit_transformation,
is_valid=is_valid)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as, and
broadcastable to, `predictions`. This tensor is multiplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in `[0,
1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins is 1
less than `num_thresholds`. Using an even `num_thresholds` value instead
of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds. Types are same as that of predictions.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# It's important we aggregate using float64 since we're accumulating a lot
# of 1.0's for the true/false labels, and accumulating to float32 will
# be quite inaccurate even with just a modest amount of values (~20M).
# We use float64 instead of integer primarily since GPU scatter kernel
# only support floats.
agg_dtype = dtypes.float64
f_labels = math_ops.cast(labels, agg_dtype)
weights = math_ops.cast(weights, agg_dtype)
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.compat.v1.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable([num_thresholds],
agg_dtype,
name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable([num_thresholds],
agg_dtype,
name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
# Convert all tensors back to predictions' dtype (as per function contract).
out_dtype = predictions.dtype
_convert = lambda tensor: math_ops.cast(tensor, out_dtype)
result = PrecisionRecallData(
tp=_convert(tp),
fp=_convert(fp),
tn=_convert(tn),
fn=_convert(fn),
precision=_convert(precision),
recall=_convert(recall),
thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.precision_at_thresholds. Note that '
'the order of the labels and predictions arguments are switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.cast(nn.in_top_k(predictions, labels, k), dtypes.float32)
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >=
1. Commonly, N=1 and predictions has shape [batch size, num_classes]. The
final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN,
num_labels], where N >= 1 and num_labels is the number of target classes
for the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count towards
`false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should be
added to.
updates_collections: An optional list of collections that updates should be
added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >=
1. Commonly, N=1 and predictions has shape [batch size, num_classes]. The
final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN,
num_labels], where N >= 1 and num_labels is the number of target classes
for the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should be
added to.
updates_collections: An optional list of collections that updates should be
added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and top_k_predictions has shape [batch size, k]. The final
dimension contains the indices of top-k labels. [D1, ... DN] must match
`labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN,
num_labels], where N >= 1 and num_labels is the number of target classes
for the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should be
added to.
updates_collections: An optional list of collections that updates should be
added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN,
num_labels], where N >= 1 and num_labels is the number of target classes
for the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count towards
`false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and top_k_predictions has shape [batch size, k]. The final
dimension contains the indices of top-k labels. [D1, ... DN] must match
`labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should be
added to.
updates_collections: An optional list of collections that updates should be
added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name,
strict_mode=False):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is no
smaller than the target precision, return the corresponding recall at the
threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
The recall at a given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
if not strict_mode:
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
else:
# We aim to find the threshold where the precision is minimum but no smaller
# than the target precision.
# The rationale:
# 1. Compute the difference between precisions (by different thresholds) and
# the target precision.
# 2. Take the reciprocal of the values by the above step. The intention is
# to make the positive values rank before negative values and also the
# smaller positives rank before larger positives.
tf_index = math_ops.argmax(
math_ops.div(1.0, precisions - precision + _EPSILON),
0,
output_type=dtypes.int32)
def _return_good_recall():
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
return control_flow_ops.cond(precisions[tf_index] >= precision,
_return_good_recall, lambda: .0)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None,
strict_mode=False):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
above the target precision, return the corresponding recall at the
threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value',
strict_mode)
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op', strict_mode)
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def precision_at_recall(labels,
predictions,
target_recall,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision at a given recall.
This function creates variables to track the true positives, false positives,
true negatives, and false negatives at a set of thresholds. Among those
thresholds where recall is at least `target_recall`, precision is computed
at the threshold where recall is closest to `target_recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
precision at `target_recall`. `update_op` increments the counts of true
positives, false positives, true negatives, and false negatives with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about precision and recall, see
http://en.wikipedia.org/wiki/Precision_and_recall
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
target_recall: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
recall.
metrics_collections: An optional list of collections to which `precision`
should be added.
updates_collections: An optional list of collections to which `update_op`
should be added.
name: An optional variable_scope name.
Returns:
precision: A scalar `Tensor` representing the precision at the given
`target_recall` value.
update_op: An operation that increments the variables for tracking the
true positives, false positives, true negatives, and false negatives and
whose value matches `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`target_recall` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_recall is not '
'supported when eager execution is enabled.')
if target_recall < 0 or target_recall > 1:
raise ValueError('`target_recall` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'precision_at_recall',
(predictions, labels, weights)):
kepsilon = 1e-7 # Used to avoid division by zero.
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
def compute_precision_at_recall(tp, fp, fn, name):
"""Computes the precision at a given recall.
Args:
tp: True positives.
fp: False positives.
fn: False negatives.
name: A name for the operation.
Returns:
The precision at the desired recall.
"""
recalls = math_ops.div(tp, tp + fn + kepsilon)
# Because recall is monotone decreasing as a function of the threshold,
# the smallest recall exceeding target_recall occurs at the largest
# threshold where recall >= target_recall.
admissible_recalls = math_ops.cast(
math_ops.greater_equal(recalls, target_recall), dtypes.int64)
tf_index = math_ops.reduce_sum(admissible_recalls) - 1
# Now we have the threshold at which to compute precision:
return math_ops.div(tp[tf_index] + kepsilon,
tp[tf_index] + fp[tf_index] + kepsilon, name)
precision_value = compute_precision_at_recall(values['tp'], values['fp'],
values['fn'], 'value')
update_op = compute_precision_at_recall(update_ops['tp'], update_ops['fp'],
update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, precision_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return precision_value, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >=
1. Commonly, N=1 and `predictions` has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN,
num_labels], where N >= 1 and num_labels is the number of target classes
for the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should be
added to.
updates_collections: An optional list of collections that updates should be
added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range [0,
num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN,
num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1
and num_labels is the number of target classes for the associated
prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels].
[D1, ... DN] must match `top_k_predictions`. Values should be in range [0,
num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should be
added to.
updates_collections: An optional list of collections that updates should be
added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_absolute_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or the
same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or the
same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or the
same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None, 'Please switch to tf.metrics.root_mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or the
same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or the
same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable([],
dtypes.float32,
name='mean_prediction')
mean_label = metrics_impl.metric_variable([],
dtypes.float32,
name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.cast(array_ops.size(labels),
dtypes.float32) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = math_ops.div_no_nan(
math_ops.reduce_sum(weighted_predictions), batch_count)
delta_mean_prediction = math_ops.div_no_nan(
(batch_mean_prediction - mean_prediction) * batch_count, update_count)
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = math_ops.div_no_nan(
math_ops.reduce_sum(weighted_labels), batch_count)
delta_mean_label = math_ops.div_no_nan(
(batch_mean_label - mean_label) * batch_count, update_count)
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals *
weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or the
same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, axis=[
dim,
], keepdims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou` should
be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) /
math_ops.log(math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value` should be
added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(new_size > array_size,
reallocate,
control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
This metric keeps track of the denominator in `tf.compat.v1.metrics.mean`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.count is not supported when eager '
'execution is enabled.')
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.cast(array_ops.size(values), dtypes.float32)
else:
values = math_ops.cast(values, dtypes.float32)
values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.cast(weights, dtypes.float32), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count_, num_values)
count_ = metrics_impl._aggregate_variable(count_, metrics_collections) # pylint: disable=protected-access
if updates_collections:
ops.add_to_collections(updates_collections, update_count_op)
return count_, update_count_op
def cohen_kappa(labels,
predictions_idx,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates Cohen's kappa.
[Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
that measures inter-annotator agreement.
The `cohen_kappa` function calculates the confusion matrix, and creates three
local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
which refer to the diagonal part, rows and columns totals of the confusion
matrix, respectively. This value is ultimately returned as `kappa`, an
idempotent operation that is calculated by
pe = (pe_row * pe_col) / N
k = (sum(po) - sum(pe)) / (N - sum(pe))
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`kappa`. `update_op` weights each prediction by the corresponding value in
`weights`.
Class labels are expected to start at 0. E.g., if `num_classes`
was three, then the possible labels would be [0, 1, 2].
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
doesn't support weighted matrix yet.
Args:
labels: 1-D `Tensor` of real labels for the classification task. Must be
one of the following types: int16, int32, int64.
predictions_idx: 1-D `Tensor` of predicted class indices for a given
classification. Must have the same type as `labels`.
num_classes: The possible number of labels.
weights: Optional `Tensor` whose shape matches `predictions`.
metrics_collections: An optional list of collections that `kappa` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
kappa: Scalar float `Tensor` representing the current Cohen's kappa.
update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
variables appropriately and whose value matches `kappa`.
Raises:
ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
have mismatched shapes, or if `weights` is not `None` and its shape
doesn't match `predictions`, or if either `metrics_collections` or
`updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '
'when eager execution is enabled.')
if num_classes < 2:
raise ValueError('`num_classes` must be >= 2.'
'Found: {}'.format(num_classes))
with variable_scope.variable_scope(name, 'cohen_kappa',
(labels, predictions_idx, weights)):
# Convert 2-dim (num, 1) to 1-dim (num,)
labels.get_shape().with_rank_at_most(2)
if labels.get_shape().ndims == 2:
labels = array_ops.squeeze(labels, axis=[-1])
predictions_idx, labels, weights = (
metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions_idx,
labels=labels,
weights=weights))
predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
stat_dtype = (
dtypes.int64
if weights is None or weights.dtype.is_integer else dtypes.float32)
po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
pe_row = metrics_impl.metric_variable((num_classes,),
stat_dtype,
name='pe_row')
pe_col = metrics_impl.metric_variable((num_classes,),
stat_dtype,
name='pe_col')
# Table of the counts of agreement:
counts_in_table = confusion_matrix.confusion_matrix(
labels,
predictions_idx,
num_classes=num_classes,
weights=weights,
dtype=stat_dtype,
name='counts_in_table')
po_t = array_ops.diag_part(counts_in_table)
pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
update_po = state_ops.assign_add(po, po_t)
update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
def _calculate_k(po, pe_row, pe_col, name):
po_sum = math_ops.reduce_sum(po)
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
math_ops.div_no_nan(
math_ops.cast(pe_row * pe_col, dtypes.float64),
math_ops.cast(total, dtypes.float64)))
po_sum, pe_sum, total = (math_ops.cast(po_sum, dtypes.float64),
math_ops.cast(pe_sum, dtypes.float64),
math_ops.cast(total, dtypes.float64))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,
total - pe_sum,
name=name)
return k
kappa = _calculate_k(po, pe_row, pe_col, name='value')
update_op = _calculate_k(
update_po, update_pe_row, update_pe_col, name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, kappa)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return kappa, update_op
__all__ = [
'auc_with_confidence_intervals',
'aggregate_metric_map',
'aggregate_metrics',
'cohen_kappa',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
|
tensorflow-master
|
tensorflow/contrib/metrics/python/ops/metric_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for set_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import sets
set_size = sets.set_size
set_intersection = sets.set_intersection
set_difference = sets.set_difference
set_union = sets.set_union
|
tensorflow-master
|
tensorflow/contrib/metrics/python/ops/set_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for rewriting graphs for quantized training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.quantize.python.quantize_graph import *
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"create_eval_graph",
"create_training_graph",
"experimental_create_eval_graph",
"experimental_create_training_graph",
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/quantize/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_matcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python import ops as contrib_ops
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class GraphMatcherTest(test_util.TensorFlowTestCase):
def test_conv_layer(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
g = ops.Graph()
with g.as_default():
inputs = array_ops.placeholder(dtypes.float32, shape=[8, 5, 5, 3])
with contrib_ops.arg_scope([layers.batch_norm],
fused=True,
is_training=True,
trainable=True):
return layers.convolution(
inputs,
num_outputs=16,
kernel_size=3,
stride=1,
padding='VALID',
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params={},
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
trainable=True,
scope=None)
inputs_pattern = graph_matcher.OpTypePattern('*', name='inputs')
relu_pattern = graph_matcher.OpTypePattern(
'Relu',
name='relu',
inputs=[
graph_matcher.OpTypePattern(
'FusedBatchNormV3',
inputs=[
graph_matcher.OpTypePattern(
'Conv2D', inputs=[inputs_pattern, '*']), '*', '*',
'*', '*'
])
])
matcher = graph_matcher.GraphMatcher(relu_pattern)
match_results = list(matcher.match_graph(g))
self.assertEqual(1, len(match_results))
match_result = match_results[0]
self.assertEqual(match_result.get_tensor(inputs_pattern), inputs)
self.assertEqual(match_result.get_tensor('inputs'), inputs)
def test_multiple_outputs(self):
# - +
# / \y0 y1/ \
# x split z
# |
# y (nodes are ops; edges are going up)
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=[1], name='x')
y = array_ops.placeholder(dtypes.float32, shape=[2], name='y')
y0, y1 = array_ops.split(y, num_or_size_splits=2, axis=0)
z = array_ops.placeholder(dtypes.float32, shape=[1], name='z')
math_ops.add(x, y0)
math_ops.subtract(y1, z)
y1_pattern = graph_matcher.OpTypePattern('*')
minus_pattern = graph_matcher.OpTypePattern('Sub', inputs=[y1_pattern, '*'])
matcher = graph_matcher.GraphMatcher(minus_pattern)
match_results = list(matcher.match_graph(g))
self.assertEqual(1, len(match_results))
match_result = match_results[0]
self.assertEqual(y0.op, y1.op)
self.assertEqual(match_result.get_op(y1_pattern), y1.op)
self.assertEqual(match_result.get_tensor(y1_pattern), y1)
def test_oneof_type_pattern(self):
# - +
# / \ / \
# x y z
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=[], name='x')
y = array_ops.placeholder(dtypes.float32, shape=[], name='y')
z = array_ops.placeholder(dtypes.float32, shape=[], name='z')
plus = x + y
minus = y - z
add_or_sub_pattern = graph_matcher.OpTypePattern(
'AddV2|Add|Sub', inputs=['*', '*'])
matcher = graph_matcher.GraphMatcher(add_or_sub_pattern)
self.assertEqual([
match_result.get_op(add_or_sub_pattern)
for match_result in matcher.match_graph(g)
], [plus.op, minus.op])
def test_oneof_pattern(self):
reshape_pattern = graph_matcher.OpTypePattern('Reshape')
transpose_pattern = graph_matcher.OneofPattern([
graph_matcher.OpTypePattern(
'Transpose',
name='transpose',
inputs=[
graph_matcher.OpTypePattern(
'Slice', name='slice', inputs=[reshape_pattern, '*', '*']),
'*'
]),
graph_matcher.OpTypePattern(
'Transpose', name='transpose', inputs=[reshape_pattern, '*'])
])
matcher = graph_matcher.GraphMatcher(transpose_pattern)
g = ops.Graph()
with g.as_default():
inputs = array_ops.placeholder(dtypes.float32, shape=[6])
reshape = array_ops.reshape(inputs, [2, 3])
transpose = array_ops.transpose(reshape)
[match_result] = list(matcher.match_graph(g))
self.assertEqual(match_result.get_tensor(reshape_pattern), reshape)
self.assertEqual(match_result.get_tensor('slice'), None)
self.assertEqual(match_result.get_op('transpose'), transpose.op)
g = ops.Graph()
with g.as_default():
inputs = array_ops.placeholder(dtypes.float32, shape=[6])
reshape = array_ops.reshape(inputs, [2, 3])
slicing = array_ops.slice(reshape, [0, 0], [-1, -1])
transpose = array_ops.transpose(slicing)
[match_result] = list(matcher.match_graph(g))
self.assertEqual(match_result.get_tensor(reshape_pattern), reshape)
self.assertEqual(match_result.get_tensor('slice'), slicing)
self.assertEqual(match_result.get_op('transpose'), transpose.op)
def test_ordered_pattern(self):
# + +
# / \ / \
# x y and y x should both match when ordered inputs is False.
# Even when x and y are different operations.
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=[], name='x')
y = constant_op.constant(1.0, dtype=dtypes.float32)
plus = x + y
add_pattern_a = graph_matcher.OpTypePattern(
'Add|AddV2', inputs=['Const', 'Placeholder'], ordered_inputs=False)
add_pattern_b = graph_matcher.OpTypePattern(
'Add|AddV2', inputs=['Placeholder', 'Const'], ordered_inputs=False)
add_pattern_fail = graph_matcher.OpTypePattern(
'Add|AddV2', inputs=['Const', 'Placeholder'], ordered_inputs=True)
# Both add_pattern_a and add_pattern_b should match the graph since
# ordered_input was set False.
matcher_a = graph_matcher.GraphMatcher(add_pattern_a)
self.assertEqual([
match_result.get_op(add_pattern_a)
for match_result in matcher_a.match_graph(g)
], [plus.op])
matcher_b = graph_matcher.GraphMatcher(add_pattern_b)
self.assertEqual([
match_result.get_op(add_pattern_b)
for match_result in matcher_b.match_graph(g)
], [plus.op])
# But if ordered_inputs is True, the inputs list match should fail if not
# specified in the right order.
matcher_fail = graph_matcher.GraphMatcher(add_pattern_fail)
self.assertEqual(
len([
match_result.get_op(add_pattern_fail)
for match_result in matcher_fail.match_graph(g)
]), 0)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/quantize/python/graph_matcher_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to update a TensorFlow model graph with quantization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Quantizable operation types that are supported by the quantization rewrite.
_QUANTIZABLE_TYPES = {'Conv2D', 'MatMul', 'DepthwiseConv2dNative'}
# Activations that are supported by the quantization rewrite.
_ACTIVATION_TYPES = {'Relu', 'Relu6', 'Identity'}
_RELU_TYPES = {'Relu', 'Relu6'}
_QUANTIZATION_OP = {'FakeQuantWithMinMaxVars'}
_VALID_SRC_OP = {'Add', 'AddV2', 'Mul'}
_INTERMEDIATE_OP = {'Add', 'AddV2', 'Mul'}
_PASS_THROUGH_OP = {'Reshape', 'Identity', 'BatchToSpaceND', 'SpaceToBatchND'}
_VALID_ACTIVATION_OP = {'Relu', 'Relu6'}
def Quantize(graph,
is_training,
weight_bits=8,
activation_bits=8,
symmetric=False,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
scope=None):
"""Updates graph with quantization operations.
Currently we quantize the following tensors:
* Conv/MatMul: Quantize the weights if it matches.
* Activation: Quantize the output if it matches.
* Bypass/Post-activation Bypass: Quantize both input and output
if it matches.
Args:
graph: Graph to modify.
is_training: Whether quantizing training graph or eval graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
symmetric: (Optional) If true, use symmetric quantization limits instead of
training the minimum and maximum of each quantization range separately.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: When quantization fails.
"""
if scope and not scope.endswith('/'):
scope += '/'
input_to_ops_map = input_to_ops.InputToOps(graph)
quantized_ops = set()
for layer_match in _FindLayersToQuantize(graph):
# Quantize the weights.
context = _GetContextFromOp(layer_match.layer_op)
# If `scope` is given, only quantize it if the consumer of weights
# (the layer op) is in the right scope.
if layer_match.weight_tensor is not None:
_InsertQuantOp(
context,
'weights_quant',
layer_match.weight_tensor.op,
input_to_ops_map.ConsumerOperations(layer_match.weight_tensor.op),
is_training,
moving_avg=False,
ema_decay=ema_decay,
quant_delay=quant_delay,
narrow_range=True,
vars_collection=vars_collection,
bits=weight_bits,
symmetric=symmetric,
consumer_scope=scope)
# Quantize the activations.
if layer_match.activation_op is not None:
consumer_ops = input_to_ops_map.ConsumerOperations(
layer_match.activation_op)
add_context = context
if layer_match.bypass_op:
pattern_match_result = re.search(r'^(.*)/([^/]+)', context)
if pattern_match_result is not None:
add_context = pattern_match_result.group(1)
else:
add_context = ''
# If `scope` is given, only quantize it if the producer of weights
# (usually it's the layer op) is in the right scope.
_InsertQuantOp(
add_context,
'act_quant',
layer_match.activation_op,
consumer_ops,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
init_min=0.0,
producer_scope=scope)
quantized_ops.add(layer_match.activation_op)
# Quantize the inputs and output to the bypass (if it exists). The input to
# the bypass is the bias add, and the output is the activation.
if layer_match.bypass_op is not None:
# If `scope` is given, only quantize it if the both the producer and the
# consumer are in the right scope.
_InsertQuantOp(
context,
'conv_quant',
layer_match.bias_add_op,
input_to_ops_map.ConsumerOperations(layer_match.bias_add_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope,
consumer_scope=scope)
quantized_ops.add(layer_match.bias_add_op)
# Make sure the op following this isn't an activation. In which case, we
# shouldn't quantize it, since the activation will be Fused into the
# Add at inference time.
consumers = input_to_ops_map.ConsumerOperations(layer_match.bypass_op)
if any(consumer.type in _ACTIVATION_TYPES for consumer in consumers):
logging.info('Skipping %s, because its followed by an activation.',
layer_match.bypass_op.name)
else:
_InsertQuantOp(
add_context,
'add_quant',
layer_match.bypass_op,
input_to_ops_map.ConsumerOperations(layer_match.bypass_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope,
consumer_scope=scope)
quantized_ops.add(layer_match.bypass_op)
# Quantize bypass ops that occur after the activation.
if layer_match.post_activation_bypass_op is not None:
pattern_match_result = re.search(
r'^(.*)/([^/]+)', layer_match.post_activation_bypass_op.name)
if pattern_match_result is not None:
post_activation_bypass_context = pattern_match_result.group(1)
else:
post_activation_bypass_context = ''
# If `scope` is given, only quantize it if the producer is in the right
# scope.
# Make sure the op following this isn't an activation. In which case, we
# shouldn't quantize it, since the activation will be Fused into the
# Add at inference time.
consumers = input_to_ops_map.ConsumerOperations(
layer_match.post_activation_bypass_op)
if any(consumer.type in _RELU_TYPES for consumer in consumers):
logging.info('Skipping %s, because its followed by an activation.',
layer_match.post_activation_bypass_op.name)
else:
_InsertQuantOp(
post_activation_bypass_context,
'post_activation_bypass_quant',
layer_match.post_activation_bypass_op,
consumers,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope)
quantized_ops.add(layer_match.post_activation_bypass_op)
_QuantizeActivationLayers(
quantized_ops,
graph,
is_training,
activation_bits,
ema_decay,
quant_delay,
vars_collection,
scope=scope)
def _QuantizeActivationLayers(quantized_ops,
graph,
is_training,
activation_bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
scope=None):
"""Quantize intermediate activation tensors after addition and multiplication.
Args:
quantized_ops: Set of previously quantized activation ops.
graph: Graph to modify.
is_training: Whether quantizing training graph or eval graph.
activation_bits: Number of bits to use for quantizing activations.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
scope: The scope to be transformed. If it's not None, only the ops which are
in this scope will be transformed.
Raises:
ValueError: When quantization fails.
"""
input_to_ops_map = input_to_ops.InputToOps(graph)
for op in (op for op in graph.get_operations()):
if _CheckIfQuantizableOp(op, quantized_ops):
logging.info('Inserting fake quant op activation_%s_quant after %s',
op.type, op.name)
consumers = input_to_ops_map.ConsumerOperations(op)
_InsertQuantOp(
op.name,
'activation_' + op.type + '_quant',
op,
consumers,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope)
def _CheckIfQuantizableOp(src_op, quantized_ops):
"""Check if the output of an op should be quantized.
Args:
src_op: op to be checked
quantized_ops: Set of previously quantized activation ops.
Returns:
Boolean specifying if output should be quantized or not.
"""
src_op_name = set([src_op.type])
if src_op in quantized_ops:
return False
if not src_op_name.intersection(_VALID_SRC_OP):
return False
# If src op is an add or a mul and the output is immediately
# followed by an activation skip
if len(src_op.outputs) == 1 and len(src_op.outputs[0].consumers()) == 1:
op_consumers = src_op.outputs[0].consumers()
if set([op_consumers[0].type]).intersection(_VALID_ACTIVATION_OP):
logging.info('Skipping quant after %s', src_op.name)
return False
# Is an Add or a Mul
input_ops = src_op.inputs
for op in input_ops:
curr_op = op.op
curr_op_type = set([curr_op.type])
while curr_op_type.intersection(_PASS_THROUGH_OP):
# Walk back through pass through ops
curr_op = curr_op.inputs[0].op
curr_op_type = set([curr_op.type])
# Now at a valid or quantizable op, need to check if
# atleast one of the inputs to a valid op is connected
# to a quantizable op via pass through ops
if (curr_op_type.intersection(_QUANTIZATION_OP) or
curr_op.name.find('delayed_quant/Merge') > 0):
return True
if curr_op_type.intersection(_INTERMEDIATE_OP):
# Check if atleast one input to intermediate_op are quantizable
for input_op in curr_op.inputs:
if _CheckIfQuantizableOp(input_op.op, quantized_ops):
return True
return False
def _FindLayersToQuantize(graph):
"""Matches layers in graph to quantize.
The following patterns get matched. Nodes surrounded by [] will be
optionally matched:
weight|folded_weight
/
conv|fc
|
[batch_to_space_nd]
|
[post_conv_correction]
|
[biasadd|folded_bias]
|
[bypass]
|
activation
|
[post_activation_bypass]
Match replacements:
If weight|folded_weight is found, FakeQuant is added afterwards.
If bypass is found, FakeQuant is added before and after.
If activation is found, FakeQuant is added afterwards.
If post_activation_bypass is found, FakeQuant is added afterwards.
Args:
graph: Graph to perform match on.
Returns:
list of _LayerMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
weight_var_pattern = graph_matcher.OpTypePattern('Variable|VariableV2')
weight_partition_identity_pattern = graph_matcher.OpTypePattern(
'Identity', inputs=[weight_var_pattern])
weight_partition_concat_pattern = graph_matcher.OpTypePattern(
'ConcatV2', inputs=[weight_partition_identity_pattern, '*', '*'])
weight_identity_pattern = graph_matcher.OpTypePattern(
'Identity',
inputs=[
graph_matcher.OneofPattern([
weight_partition_identity_pattern,
weight_partition_concat_pattern,
weight_var_pattern,
])
])
weight_resource_var_pattern = graph_matcher.OpTypePattern('ReadVariableOp')
folded_weight_pattern = graph_matcher.OpTypePattern('Mul')
# The weights inputs to the layer operation can either be from the Variable or
# the folded weight (Mul).
layer_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
input_pattern,
graph_matcher.OneofPattern([
weight_identity_pattern, weight_resource_var_pattern,
folded_weight_pattern
])
],
ordered_inputs=False)
# For atrous convolutions a BatchToSpaceND will occur after the depthwise
# convolution.
batch_to_space_pattern = graph_matcher.OpTypePattern(
'BatchToSpaceND',
inputs=[
layer_pattern,
graph_matcher.OpTypePattern('*'),
graph_matcher.OpTypePattern('*')
])
layer_output_pattern = graph_matcher.OneofPattern(
[batch_to_space_pattern, layer_pattern])
# For separable convolutions, we are looking for a conv, followed by a conv
# with no activations between the two.
sep_conv_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
graph_matcher.OneofPattern([layer_output_pattern]),
graph_matcher.OpTypePattern('*')
],
ordered_inputs=False)
folded_bias_mul_pattern = graph_matcher.OpTypePattern(
'Mul',
inputs=[graph_matcher.OpTypePattern('*'), layer_output_pattern],
ordered_inputs=False)
post_layer_op_correction_pattern = graph_matcher.OpTypePattern(
'Add|AddV2',
inputs=[folded_bias_mul_pattern,
graph_matcher.OpTypePattern('*')],
ordered_inputs=False)
folded_bias_add_pattern = graph_matcher.OpTypePattern(
'Add|AddV2',
inputs=[
post_layer_op_correction_pattern,
graph_matcher.OpTypePattern('*')
],
ordered_inputs=False)
# batch_norms with forced updates have an Identity operation at the end.
# TODO(suharshs): Find a way to easily skip extra Identity operations. The
# current issue is that doing so can often match patterns across many layers
# incorrectly.
batch_norm_identity = graph_matcher.OpTypePattern(
'Identity', inputs=[folded_bias_add_pattern])
bias_add_pattern = graph_matcher.OpTypePattern(
'Add|AddV2|BiasAdd',
inputs=[layer_output_pattern, '*'],
ordered_inputs=False)
# The bias can come from the bias add or the folded bias add.
bypass_pattern = graph_matcher.OpTypePattern(
'Add|AddV2',
inputs=[
graph_matcher.OneofPattern(
[bias_add_pattern, folded_bias_add_pattern, batch_norm_identity]),
'*'
],
ordered_inputs=False)
# The input to the activation can come from bias add, fold bias add, the
# bypasses.
# TODO(suharshs): We should ideally skip Identity operations instead of
# treating them as activations.
activation_pattern = graph_matcher.OpTypePattern(
'|'.join(_ACTIVATION_TYPES) + '|Identity',
inputs=[
graph_matcher.OneofPattern([
bias_add_pattern,
folded_bias_add_pattern,
batch_norm_identity,
bypass_pattern,
layer_pattern,
])
])
post_activation_bypass_pattern = graph_matcher.OpTypePattern(
'Add|AddV2', inputs=['*', activation_pattern], ordered_inputs=False)
# The order of the following matching blocks is very important. Since matches
# aren't guaranteed to be disjoint, we structure matches from largest to
# smallest to guarantee that the largest match always wins. Additionally, we
# ensure that we don't match layers multiple times.
layer_matches = []
# We use matched_layer_set to ensure that layers aren't matched multiple
# times.
matched_layer_set = set()
# First, we match layers that have a post activation bypass. We do this first
# to ensure we don't match only the first part of this layer, missing the
# post activation bypass node.
post_activation_bypass_layer_matcher = graph_matcher.GraphMatcher(
post_activation_bypass_pattern)
for match_result in post_activation_bypass_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern)
post_activation_bypass_op = match_result.get_op(
post_activation_bypass_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op))
# Now, we match the basic layer ending at an activation. We may get duplicate
# matches from above, but we don't add them to layer_matches.
layer_matcher = graph_matcher.GraphMatcher(activation_pattern)
for match_result in layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern)
if layer_op not in matched_layer_set:
if not _IsSkipLayer(activation_op):
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op, None,
bias_add_op))
# Match the final layer, where there may not be an activation and instead
# the output of the final BiasAdd must be quantized. So we treat the BiasAdd
# as the 'activation_op' in the _LayerMatch, to ensure that it's output is
# quantized.
final_layer_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern([bias_add_pattern, folded_bias_add_pattern]))
for match_result in final_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(bias_add_pattern)
if activation_op is None:
activation_op = match_result.get_op(folded_bias_add_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, None, None, None))
# Look for separable convolutions here
sep_conv_matcher = graph_matcher.GraphMatcher(sep_conv_pattern)
for match_result in sep_conv_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
activation_op = match_result.get_op(layer_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, None, None, None))
return layer_matches
def _IsSkipLayer(activation_op):
"""Skip quantizing conv->identity->Batch norm layers.
Args:
activation_op: Activation op detected by layer matching pattern
Returns:
skip_layer: boolean, true when conv->identity->batch norm is detected.
"""
# Exclude quantization of conv->identity->BN,
# After folding, this part corresponds to estimation of mean and variance
# and should not be quantized.
skip_layer = False
if activation_op.type == 'Identity' and len(activation_op.outputs) == 1:
if len(activation_op.outputs[0].consumers()) == 1:
consumer = activation_op.outputs[0].consumers()[0]
if consumer.type in ['FusedBatchNorm', 'FusedBatchNormV3']:
skip_layer = True
logging.info(
'Skipping quantizing %s, because it is the output of a conv/fc '
'followed by a identity, feeding a fused batch norm.',
activation_op.name)
return skip_layer
class _LayerMatch(object):
"""Contains all information related to a matched Layer."""
def __init__(self, layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op):
self._layer_op = layer_op
self._weight_tensor = weight_tensor
self._activation_op = activation_op
self._bypass_op = bypass_op
self._post_activation_bypass_op = post_activation_bypass_op
self._bias_add_op = bias_add_op
@property
def layer_op(self):
return self._layer_op
@property
def weight_tensor(self):
return self._weight_tensor
@property
def activation_op(self):
return self._activation_op
@property
def bypass_op(self):
return self._bypass_op
@property
def post_activation_bypass_op(self):
return self._post_activation_bypass_op
@property
def bias_add_op(self):
return self._bias_add_op
def _FollowedByFakeQuant(tensor):
"""Returns True if the tensor is followed by a FakeQuant."""
fake_quant_ops = set([
'FakeQuantWithMinMaxVars', 'FakeQuantWithMinMaxArgs',
'FakeQuantWithMinMaxVarsPerChannel'
])
pass_through_ops = set(['Reshape', 'Identity'])
consumers = tensor.consumers()
while consumers:
c = consumers.pop()
if c.type in fake_quant_ops:
return True
elif c.type in pass_through_ops:
for output in c.outputs:
consumers.extend(output.consumers())
return False
def _InsertQuantOp(context,
name,
producer,
consumers,
is_training,
moving_avg=True,
init_min=-6.0,
init_max=6.0,
bits=8,
symmetric=False,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
narrow_range=False,
producer_scope=None,
consumer_scope=None):
"""Inserts a quant op between a producer op and (multiple) consumer ops.
Args:
context: Context where producer and consumer operations are nested.
name: Name for the new quantization op within the context.
producer: Producer operation of the pairs where quantization will be
inserted.
consumers: Consumer operations of the pairs.
is_training: Whether quantizing training graph or eval graph.
moving_avg: Specifies whether to use exponential moving average or just
the last value seen.
init_min: Starting minimum value for the new quantization op.
init_max: Starting maximum value for the new quantization op.
bits: Number of bits to use for quantization, must be between 2 and 8.
symmetric: (Optional) If true, use symmetric quantization limits instead of
training the minimum and maximum of each quantization range separately.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
narrow_range: Whether to use the narrow quantization range
[1; 2^bits - 1] or wide range [0; 2^bits - 1].
producer_scope: The restriction of producer scope. If not None, the new op
will be inserted only when the producer is in this scope.
consumer_scope: The restriction of consumer scope. If not None, the new op
will be inserted only when all the consumers are in this scope.
Raises:
ValueError: When producer operation is not directly connected to the
consumer operation.
"""
if producer_scope and not producer.name.startswith(producer_scope):
logging.info(
'_InsertQuantOp ignores context="%s" name="%s" '
'because producer "%s" is not in scope "%s"',
context, name, producer.name, producer_scope)
return
if consumer_scope:
consumers_in_scope = []
for consumer in consumers:
if consumer.name.startswith(consumer_scope):
consumers_in_scope.append(consumer)
else:
logging.info(
'_InsertQuantOp context="%s" name="%s" ignores '
'consumer "%s" because it is not in scope "%s"',
context, name, consumer.name, consumer_scope)
return
consumers = consumers_in_scope
name_prefix = _AddContextToName(context, name)
# This is needed on TPU where name_scope == 'TPUReplicate/loop', and
# name_prefix starts with 'TPUReplicate/loop/'; without dropping it
# variables are created as TPUReplicate/loop/TPUReplicate/loop/..., which
# breaks things later.
name_scope = ops.get_name_scope()
if name_scope:
name_prefix = common.DropStringPrefix(name_prefix, name_scope + '/')
inputs = producer.outputs[0]
# Prevent ops from being quantized multiple times. Bypass ops can sometimes
# overlap between multiple matches, so we need to ensure that we don't
# add duplicate FakeQuant operations.
if _FollowedByFakeQuant(inputs):
return
if moving_avg:
quant = (
quant_ops.MovingAvgQuantize(
inputs,
init_min=init_min,
init_max=init_max,
ema_decay=ema_decay,
is_training=is_training,
num_bits=bits,
symmetric=symmetric,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
else:
quant = (
quant_ops.LastValueQuantize(
inputs,
init_min=init_min,
init_max=init_max,
is_training=is_training,
num_bits=bits,
symmetric=symmetric,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
if quant_delay and quant_delay > 0:
activate_quant = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
quant_delay,
name=name_prefix + '/activate_quant')
quant = control_flow_ops.cond(
activate_quant,
lambda: quant,
lambda: inputs,
name=name_prefix + '/delayed_quant')
if consumers:
tensors_modified_count = common.RerouteTensor(
quant, inputs, can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
def _GetContextFromOp(op):
"""Gets the root context name from the op name."""
context_re = re.search(r'^(.*)/([^/]+)', op.name)
if context_re:
return context_re.group(1)
return ''
def _AddContextToName(context, name):
"""Adds the context to the name if it exists."""
if not context:
return name
return context + '/' + name
|
tensorflow-master
|
tensorflow/contrib/quantize/python/quantize.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the quantize_graph graph rewriting API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import quantize_graph
from tensorflow.python import training
from tensorflow.python.compat import compat
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template
from tensorflow.python.platform import googletest
class QuantizeGraphTest(test_util.TensorFlowTestCase):
# We have a lot of other tests that test the details of the rewrite, here we
# just the specific features of the quantize_graph API.
def _RunTestOverAllRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverTrainingRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.experimental_create_training_graph,
functools.partial(
quantize_graph.experimental_create_training_graph, symmetric=True),
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverEvalRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_eval_graph,
functools.partial(
quantize_graph.experimental_create_eval_graph, symmetric=True),
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverExperimentalRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverExperimentalRewritesWithScope(self, test_fn, scope):
def with_absent_scope(fn):
def fn_with_absent_scope(*args):
fn(*args, scope=scope)
return fn_with_absent_scope
rewrite_fns = [
with_absent_scope(
quantize_graph.experimental_create_training_graph),
with_absent_scope(
quantize_graph.experimental_create_eval_graph),
]
for fn in rewrite_fns:
test_fn(fn)
def testRewrite(self):
self._RunTestOverAllRewrites(self._TestRewrite)
def _TestRewrite(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._ConvLayer()
orig_variable_names = set(
[v.name for v in graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
rewrite_fn(graph)
q_variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
def testDefaultGraph(self):
self._RunTestOverAllRewrites(self._TestRewrite)
def _TestDefaultGraph(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer()
orig_variable_names = set(
[v.name for v in g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
rewrite_fn()
q_variables = g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
def testWithPostActivationBypass(self):
self._RunTestOverAllRewrites(self._TestWithPostActivationBypass)
def _TestWithPostActivationBypass(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer(post_activation_bypass=True, scope='scope1')
rewrite_fn()
op_names = [op.name for op in g.get_operations()]
self.assertTrue(any(
'scope1/post_activation_bypass_quant/' in name for name in op_names))
def testQuantDelay(self):
self._RunTestOverTrainingRewrites(self._TestQuantDelay)
def _TestQuantDelay(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
quant_delay = 100
rewrite_fn(quant_delay=quant_delay)
quant_delay_found = False
for op in g.get_operations():
# Check to see if the quant_delay is correctly set.
if 'activate_quant' in op.name and op.type == 'Const':
quant_delay_found = True
const_value = str(op.get_attr('value'))
self.assertTrue(('int64_val: %i' % quant_delay) in const_value)
self.assertTrue(quant_delay_found)
def testTrainingOpsCheck(self):
self._RunTestOverTrainingRewrites(self._TestTrainingOpsCheck)
def _TestTrainingOpsCheck(self, rewrite_fn):
with ops.Graph().as_default():
output = self._ConvLayer()
output_scalar = math_ops.reduce_sum(output)
loss = math_ops.square(output_scalar - 1)
opt = training.gradient_descent.GradientDescentOptimizer(0.0001)
opt.minimize(loss)
with self.assertRaisesRegexp(ValueError, 'Training op found in graph'):
rewrite_fn()
def testWeightBits(self):
self._RunTestOverExperimentalRewrites(self._TestWeightBits)
def _TestWeightBits(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
weight_bits = 4
rewrite_fn(weight_bits=weight_bits)
weights_quant_found = False
for op in g.get_operations():
# Check to see if FakeQuant operations for weights have the right bits
# set.
if 'weights_quant' in op.name and op.type == 'FakeQuantWithMinMaxVars':
weights_quant_found = True
self.assertEqual(op.get_attr('num_bits'), weight_bits)
self.assertTrue(weights_quant_found)
def testActivationBits(self):
self._RunTestOverExperimentalRewrites(self._TestActivationBits)
def _TestActivationBits(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
activation_bits = 4
rewrite_fn(activation_bits=activation_bits)
act_quant_found = False
for op in g.get_operations():
# Check to see if FakeQuant operations for activations have the right bits
# set.
act_quant_names = ['act_quant', 'conv_quant', 'add_quant']
if any(s in op.name
for s in act_quant_names) and op.type == 'FakeQuantWithMinMaxVars':
act_quant_found = True
self.assertEqual(op.get_attr('num_bits'), activation_bits)
self.assertTrue(act_quant_found)
def testTrainingQuantization(self):
self._RunTestOverTrainingRewrites(self._TestTrainingQuantization)
def _TestTrainingQuantization(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
# Ensure that FakeQuant and variable update nodes were found.
quant_found = False
assign_min_last_found = False
assign_min_ema_found = False
assign_max_last_found = False
assign_max_ema_found = False
for op in g.get_operations():
# Check that FakeQuant operations were added.
if op.type == 'FakeQuantWithMinMaxVars':
quant_found = True
# Check that update operations for the added min max variables exist in
# the graph.
if 'AssignMinLast' in op.name:
assign_min_last_found = True
elif 'AssignMinEma' in op.name:
assign_min_ema_found = True
elif 'AssignMaxLast' in op.name:
assign_max_last_found = True
elif 'AssignMaxEma' in op.name:
assign_max_ema_found = True
self.assertTrue(assign_min_last_found)
self.assertTrue(assign_min_ema_found)
self.assertTrue(assign_max_last_found)
self.assertTrue(assign_max_ema_found)
self.assertTrue(quant_found)
def testEvalQuantization(self):
self._RunTestOverEvalRewrites(self._TestEvalQuantization)
def _TestEvalQuantization(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
# Ensure that FakeQuant and variable update nodes were found.
quant_found = False
for op in g.get_operations():
# Check that FakeQuant operations were added.
if op.type == 'FakeQuantWithMinMaxVars':
quant_found = True
# Check that update operations for the added min max variables don't
# exist in the graph.
update_names = [
'AssignMinLast', 'AssignMinEma', 'AssignMaxLast', 'AssignMaxEma'
]
self.assertFalse(any(s in op.name for s in update_names))
self.assertTrue(quant_found)
def testIdempotent(self):
self._RunTestOverAllRewrites(self._TestIdempotent)
def _TestIdempotent(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
graph_def_before = str(g.as_graph_def())
# Ensuring that calling the rewrite again doesn't add more nodes.
rewrite_fn()
graph_def_after = str(g.as_graph_def())
self.assertEqual(graph_def_before, graph_def_after)
def testIdentityNode(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunTestOverAllRewrites(self._TestIdentityNode)
def _TestIdentityNode(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._LayerWithIdentity()
rewrite_fn(graph)
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(any('test/Conv/weights_quant' in name for name in op_names))
self.assertTrue(any('test/Conv/act_quant' in name for name in op_names))
bn_out_identity = graph.get_operation_by_name('test/bn_out')
self._AssertInputOpsAre(bn_out_identity, [
'test/Conv/add_fold',
])
conv_out_identity = graph.get_operation_by_name('test/conv_out')
self._AssertOutputGoesToOps(conv_out_identity, graph,
['test/BatchNorm/FusedBatchNormV3'])
def testActivationQuantization(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunTestOverAllRewrites(self._TestActivationQuantization)
def _TestActivationQuantization(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
_ = self._LayerWithActivationProcessing()
rewrite_fn(graph)
# Check if outputs of multipliers and adds are quantized.
mul_op = graph.get_operation_by_name('test/Mul')
self._AssertOutputGoesToOps(
mul_op, graph,
['test/Mul/activation_Mul_quant/FakeQuantWithMinMaxVars'])
mul_op = graph.get_operation_by_name('test/Mul_1')
self._AssertOutputGoesToOps(
mul_op, graph,
['test/Mul_1/activation_Mul_quant/FakeQuantWithMinMaxVars'])
add_op = graph.get_operation_by_name('test/add')
if compat.forward_compatible(2019, 6, 21):
self._AssertOutputGoesToOps(
add_op, graph,
['test/add/activation_AddV2_quant/FakeQuantWithMinMaxVars'])
else:
self._AssertOutputGoesToOps(
add_op, graph,
['test/add/activation_Add_quant/FakeQuantWithMinMaxVars'])
def testRewriteWithScope(self):
self._RunTestOverExperimentalRewritesWithScope(
self._TestRewriteWithScope, 'scope1')
def _TestRewriteWithScope(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
scope1_output = self._ConvLayer(scope='scope1')
self._ConvLayer(input_tensor=scope1_output, scope='scope2')
rewrite_fn(graph)
op_names = [op.name for op in graph.get_operations()]
# The weights and activation of scope1 is quantized, but not scope2.
self.assertTrue(
any('scope1/Conv/act_quant' in name for name in op_names))
self.assertTrue(
any('scope1/Conv/weights_quant' in name for name in op_names))
self.assertFalse(
any('scope2/Conv/act_quant' in name for name in op_names))
self.assertFalse(
any('scope2/Conv/weights_quant' in name for name in op_names))
def testRewriteWithNonMatchingScope(self):
self._RunTestOverExperimentalRewritesWithScope(
self._TestRewriteWithNonMatchingScope, 'NonExistingScope')
def _TestRewriteWithNonMatchingScope(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._ConvLayer()
op_names_before_rewrite = set([op.name for op in graph.get_operations()])
rewrite_fn(graph)
op_names_after_rewrite = set([op.name for op in graph.get_operations()])
# No ops should be inserted or removed.
self.assertEqual(op_names_before_rewrite, op_names_after_rewrite)
def testActivationRewriteWithScope(self):
self._RunTestOverExperimentalRewritesWithScope(
self._TestActivationRewriteWithScope, 'scope1')
def _TestActivationRewriteWithScope(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
output = self._LayerWithIdentity(scope='scope1')
with ops.name_scope('scope2'):
output = nn_ops.relu6(output)
scaled_output1 = math_ops.mul(2.0, output)
scaled_output2 = math_ops.mul(3.0, output)
output = scaled_output1 + scaled_output2
rewrite_fn(graph)
op_names = [op.name for op in graph.get_operations()]
# The weights and activation of scope1 is quantized, but not scope2.
self.assertTrue(any('scope1/Conv/act_quant' in name for name in op_names))
self.assertTrue(
any('scope1/Conv/weights_quant' in name for name in op_names))
for op_name in op_names:
if op_name.startswith('scope2'):
self.assertTrue('FakeQuant' not in op_name)
def testActivationRewriteWithNonMatchingScope(self):
self._RunTestOverExperimentalRewritesWithScope(
self._TestActivationRewriteWithNonMatchingScope, 'NonExistingScope')
def _TestActivationRewriteWithNonMatchingScope(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._LayerWithActivationProcessing()
rewrite_fn(graph)
op_types_after_rewrite = set([op.type for op in graph.get_operations()])
self.assertFalse(
op_types_after_rewrite.intersection('FakeQuantWithMinMaxVars'))
# No fake quant ops should be inserted.
def testWithSharedWeights(self):
self._RunTestOverAllRewrites(self._TestWithSharedWeights)
self._RunTestOverTrainingRewrites(self._TestRewriteWithSharedWeights)
def _TestRewriteWithSharedWeights(self, rewrite_fn, quant_delay=1):
self._TestWithSharedWeights(rewrite_fn, quant_delay)
def _TestWithSharedWeights(self, rewrite_fn, quant_delay=None):
with ops.Graph().as_default() as g:
conv = template.make_template('shared_weights_conv', self._ConvLayer)
conv()
conv()
if quant_delay is None:
rewrite_fn()
else:
rewrite_fn(quant_delay=quant_delay)
conv_ops = [op for op in g.get_operations() if op.type == 'Conv2D']
weights_quants = [
op for op in g.get_operations()
if 'weights_quant' in op.name and op.type == 'FakeQuantWithMinMaxVars'
]
# Check that the shared weights variable is not quantized multiple times
self.assertTrue(len(weights_quants) == 1)
weights_quant_tensor = weights_quants[0].outputs[0]
if quant_delay:
delayed_weights_quants = [
op for op in g.get_operations()
if 'weights_quant' in op.name and op.type == 'Merge'
]
self.assertTrue(len(delayed_weights_quants) == 1)
weights_quant_tensor = delayed_weights_quants[0].outputs[0]
# Check that the Conv2D operations get the quantized weights
self.assertTrue(all(weights_quant_tensor in op.inputs for op in conv_ops))
def _ConvLayer(
self, input_tensor=None, scope='test', pre_activation_bypass=False,
post_activation_bypass=False):
"""Add a basic convolution layer to the default graph."""
batch_size, height, width, depth = 5, 128, 128, 3
if input_tensor is None:
input_tensor = array_ops.zeros((batch_size, height, width, depth))
weight_init = init_ops.truncated_normal_initializer
with ops.name_scope(scope):
output = layers.conv2d(
input_tensor,
depth, [5, 5],
padding='SAME',
weights_initializer=weight_init(0.09),
activation_fn=None)
if pre_activation_bypass:
output += input_tensor
output = nn_ops.relu6(output)
if post_activation_bypass:
output += input_tensor
return output
def _LayerWithIdentity(self,
input_tensor=None,
scope='test',
post_activation_bypass=False):
"""Add a basic conv, identity, batch norm with skip to the default graph."""
batch_size, height, width, depth = 5, 128, 128, 3
if input_tensor is None:
input_tensor = array_ops.zeros((batch_size, height, width, depth))
weight_init = init_ops.truncated_normal_initializer
with ops.name_scope(scope):
output = layers.conv2d(
input_tensor,
depth, [5, 5],
padding='SAME',
weights_initializer=weight_init(0.09),
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
output = array_ops.identity(output, name='conv_out')
output = layers.batch_norm(
output, center=True, scale=True, decay=1.0 - 0.003, fused=True)
output = array_ops.identity(output, name='bn_out')
if post_activation_bypass:
output += input_tensor
return output
def _LayerWithActivationProcessing(self,
input_tensor=None,
scope='test',
post_activation_bypass=False):
batch_size, height, width, depth = 5, 128, 128, 3
if input_tensor is None:
input_tensor = array_ops.zeros((batch_size, height, width, depth))
weight_init = init_ops.truncated_normal_initializer
with ops.name_scope(scope):
output = layers.conv2d(
input_tensor,
depth, [5, 5],
padding='SAME',
weights_initializer=weight_init(0.09),
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
output = layers.batch_norm(
output, center=True, scale=True, decay=1.0 - 0.003, fused=True)
output = nn_ops.relu6(output)
scaled_output1 = math_ops.mul(2.0, output)
scaled_output2 = math_ops.mul(3.0, output)
output = scaled_output1 + scaled_output2
return output
def _AssertInputOpsAre(self, op, in_op_names):
"""Asserts that all inputs to op come from in_op_names (disregarding order).
Args:
op: Operation to check inputs for.
in_op_names: List of strings, operations where all op's inputs should come
from.
"""
expected_inputs = [in_op_name + ':0' for in_op_name in in_op_names]
self.assertItemsEqual([t.name for t in op.inputs], expected_inputs)
def _AssertOutputGoesToOps(self, op, graph, out_op_names):
"""Asserts that outputs from op go to out_op_names (and perhaps others).
Args:
op: Operation to check outputs for.
graph: Graph where output operations are located.
out_op_names: List of strings, operations where op's outputs should go.
"""
for out_op_name in out_op_names:
out_op = graph.get_operation_by_name(out_op_name)
self.assertIn(op.outputs[0].name, [str(t.name) for t in out_op.inputs])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/quantize/python/quantize_graph_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for common utilities in this package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import common
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
batch_norm = layers.batch_norm
conv2d = layers.conv2d
class CommonTest(test_util.TensorFlowTestCase):
def testCreateOrGetQuantizationStep(self):
self._TestCreateOrGetQuantizationStep(False)
def testCreateOrGetQuantizationStepResourceVar(self):
self._TestCreateOrGetQuantizationStep(True)
def _TestCreateOrGetQuantizationStep(self, use_resource):
g = ops.Graph()
with session.Session(graph=g) as sess:
variable_scope.get_variable_scope().set_use_resource(use_resource)
quantization_step_tensor = common.CreateOrGetQuantizationStep()
# Check that operations are added to the graph.
num_nodes = len(g.get_operations())
self.assertGreater(num_nodes, 0)
# Check that getting the quantization step doesn't change the graph.
get_quantization_step_tensor = common.CreateOrGetQuantizationStep()
self.assertEqual(quantization_step_tensor, get_quantization_step_tensor)
self.assertEqual(num_nodes, len(g.get_operations()))
# Ensure that running the graph increments the quantization step.
sess.run(variables.global_variables_initializer())
step_val = sess.run(quantization_step_tensor)
self.assertEqual(step_val, 1)
# Ensure that even running a graph that depends on the quantization step
# multiple times only executes it once.
a = quantization_step_tensor + 1
b = a + quantization_step_tensor
_, step_val = sess.run([b, quantization_step_tensor])
self.assertEqual(step_val, 2)
def testRerouteTensor(self):
a = constant_op.constant(1, name='a')
b = constant_op.constant(2, name='b')
c = constant_op.constant(3, name='c')
d = constant_op.constant(4, name='d')
add_ac = math_ops.add(a, c)
add_ad = math_ops.add(a, d)
# Ensure that before rerouting the inputs are what we think.
self._CheckOpHasInputs(add_ac.op, [a, c])
self._CheckOpHasInputs(add_ad.op, [a, d])
# references to tensor a should be replaced with b for all ops in
# can_modify. This means add_ac will be changed but add_ad will not.
common.RerouteTensor(b, a, can_modify=[add_ac.op])
self._CheckOpHasInputs(add_ac.op, [b, c])
self._CheckOpHasInputs(add_ad.op, [a, d])
def _CheckOpHasInputs(self, op, inputs):
for i in inputs:
self.assertIn(i, op.inputs)
def testBatchNormScope(self):
batch_size, height, width, depth = 5, 128, 128, 3
g = ops.Graph()
with g.as_default():
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1
out_depth = 32
scope = ''
node = conv2d(
inputs,
out_depth, [2, 2],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(False),
scope=scope)
node = nn_ops.relu(node, name='Relu6')
bn_list = common.BatchNormGroups(g)
with open('/tmp/common_test.pbtxt', 'w') as f:
f.write(str(g.as_graph_def()))
# Exactly one batch norm layer with empty scope should be found
self.assertEqual(len(bn_list), 1)
self.assertEqual(bn_list[0], '')
def _BatchNormParams(self, fused=False, force_updates=False):
params = {
'center': True,
'scale': True,
'decay': 1.0 - 0.003,
'fused': fused
}
return params
def _WeightInit(self, stddev):
"""Returns a truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initializer that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev, seed=1234)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/quantize/python/common_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""API to simulate quantization on a python graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.quantize.python import fold_batch_norms
from tensorflow.contrib.quantize.python import quantize
from tensorflow.python.framework import ops
def _create_graph(input_graph=None,
is_training=True,
weight_bits=8,
activation_bits=8,
symmetric=False,
quant_delay=None,
freeze_bn_delay=None,
scope=None):
"""Rewrites an input_graph in place for simulated quantization.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
Args:
input_graph: The tf.Graph to be transformed, if None then defaults to the
default graph.
is_training: Whether quantizing training or eval graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
quant_delay: Number of steps after which weights and activations are
quantized during training.
freeze_bn_delay: Number of steps after which moving mean and variance are
frozen and used instead of batch statistics during training.
freeze_bn_delay should be greater than quant_delay and should correspond
to the number of steps when training has almost converged
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
if input_graph is None:
input_graph = ops.get_default_graph()
# Add check to see if graph has training ops, if so provide error message and
# exit
_check_for_training_ops(input_graph)
with input_graph.as_default():
fold_batch_norms.FoldBatchNorms(
input_graph,
freeze_batch_norm_delay=freeze_bn_delay,
is_training=is_training)
quantize.Quantize(
input_graph,
is_training,
quant_delay=quant_delay,
weight_bits=weight_bits,
activation_bits=activation_bits,
symmetric=symmetric,
scope=scope)
def create_training_graph(input_graph=None, quant_delay=0):
"""Rewrites a training input_graph in place for simulated quantization.
Variables added by the rewrite get added to the global variables collection.
This function must be invoked prior to insertion of gradient ops in a graph
as quantization should be modeled in both forward and backward passes.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
The default value of quant_delay is suitable for finetuning an already trained
floating point model (recommended).
If one wants to train a quantized model from scratch, quant_delay should be
set to the number of steps it take the floating point model to converge.
Quantization will be activated at this point and effectively finetune the
model. If quant_delay is not provided when training from scratch, training can
often fail.
Args:
input_graph: The tf.Graph to be transformed.
quant_delay: Number of steps after which weights and activations are
quantized during training.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
# TODO(raghuramank) Need to have freeze_bn_delay be a function of batch size
# Currently the values below are hardcoded for mobilenetV1 on imagenet
# Please use the experimental API if you need to tune these values.
freeze_bn_delay = None
_create_graph(
input_graph=input_graph,
is_training=True,
quant_delay=quant_delay,
freeze_bn_delay=freeze_bn_delay)
def create_eval_graph(input_graph=None):
"""Rewrites an eval input_graph in place for simulated quantization.
Variables added by the rewrite get added to the global variables collection.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
Args:
input_graph: The tf.Graph to be transformed, if None then defaults to the
default graph.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
_create_graph(input_graph=input_graph, is_training=False)
def experimental_create_training_graph(input_graph=None,
weight_bits=8,
activation_bits=8,
symmetric=False,
quant_delay=0,
freeze_bn_delay=None,
scope=None):
"""Rewrites a training input_graph in place for simulated quantization.
This function must be invoked prior to insertion of gradient ops in a graph
as quantization should be modeled in both forward and backward passes.
Variables added by the rewrite get added to the global variables collection.
This function has additional experimental options not (yet) available to
create_training_graph. The resulting behavior may be undefined.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
The default value of quant_delay is suitable for finetuning an already trained
floating point model (recommended).
If one wants to train a quantized model from scratch, quant_delay should be
set to the number of steps it take the floating point model to converge.
Quantization will be activated at this point and effectively finetune the
model. If quant_delay is not provided when training from scratch, training can
often fail.
Args:
input_graph: The tf.Graph to be transformed, if None then defaults to the
default graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
quant_delay: Number of steps after which weights and activations are
quantized during training.
freeze_bn_delay: Number of steps after which moving mean and variance are
frozen and used instead of batch statistics during training.
freeze_bn_delay should be greater than quant_delay and should correspond
to when training has almost converged
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
_create_graph(
input_graph=input_graph,
is_training=True,
weight_bits=weight_bits,
activation_bits=activation_bits,
symmetric=symmetric,
quant_delay=quant_delay,
freeze_bn_delay=freeze_bn_delay,
scope=scope)
def experimental_create_eval_graph(input_graph=None,
weight_bits=8,
activation_bits=8,
symmetric=False,
quant_delay=None,
scope=None):
"""Rewrites an eval input_graph in place for simulated quantization.
Variables added by the rewrite get added to the global variables collection.
This function has additional experimental options not (yet) available to
create_eval_graph. The resulting behavior may be undefined.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
Args:
input_graph: The tf.Graph to be transformed, if None then defaults to the
default graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
quant_delay: Number of steps after which weights and activations are
quantized during eval.
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
_create_graph(
input_graph=input_graph,
is_training=False,
weight_bits=weight_bits,
activation_bits=activation_bits,
symmetric=symmetric,
quant_delay=quant_delay,
scope=scope)
def _check_for_training_ops(g):
"""Check if training ops are present in the graph.
Args:
g: The tf.Graph on which the check for training ops needs to be
performed.
Raises:
ValueError: If a training op is seen in the graph;
"""
# The list here is obtained
# from https://www.tensorflow.org/api_docs/cc/group/training-ops
training_ops = frozenset([
'ApplyAdagrad', 'ApplyAdagradDA', 'ApplyAdam', 'ApplyAddSign',
'ApplyCenteredRMSProp', 'ApplyFtrl', 'ApplyFtrlV2',
'ApplyGradientDescent', 'ApplyMomentum', 'ApplyPowerSign',
'ApplyProximalAdagrad', 'ApplyProximalGradientDescent', 'ApplyRMSProp',
'ResourceApplyAdadelta', 'ResourceApplyAdagrad', 'ResourceApplyAdagradDA',
'ResourceApplyAdam', 'ResourceApplyAddSign',
'ResourceApplyCenteredRMSProp', 'ResourceApplyFtrl',
'ResourceApplyFtrlV2', 'ResourceApplyGradientDescent',
'ResourceApplyMomentum', 'ResourceApplyPowerSign',
'ResourceApplyProximalAdagrad', 'ResourceApplyProximalGradientDescent',
'ResourceApplyRMSProp', 'ResourceSparseApplyAdadelta',
'ResourceSparseApplyAdagrad', 'ResourceSparseApplyAdagradDA',
'ResourceSparseApplyCenteredRMSProp', 'ResourceSparseApplyFtrl',
'ResourceSparseApplyFtrlV2', 'ResourceSparseApplyMomentum',
'ResourceSparseApplyProximalAdagrad',
'ResourceSparseApplyProximalGradientDescent',
'ResourceSparseApplyRMSProp', 'SparseApplyAdadelta', 'SparseApplyAdagrad',
'SparseApplyAdagradDA', 'SparseApplyCenteredRMSProp', 'SparseApplyFtrl',
'SparseApplyFtrlV2', 'SparseApplyMomentum', 'SparseApplyProximalAdagrad',
'SparseApplyProximalGradientDescent', 'SparseApplyRMSProp'
])
op_types = set([op.type for op in g.get_operations()])
train_op_list = op_types.intersection(training_ops)
if train_op_list:
raise ValueError('Training op found in graph, exiting %s' % train_op_list)
|
tensorflow-master
|
tensorflow/contrib/quantize/python/quantize_graph.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to update a Tensorflow model graph with quantization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.quantize.python import common
class InputToOps(object):
"""Holds a mapping from tensor's name to ops that take it as input."""
def __init__(self, graph):
"""Initializes mapping from tensor's name to ops that take it.
Helps find edges between ops faster and avoids iterating over the whole
graph. The mapping is of type Dict[str, Set[tf.Operation]].
Note: while inserting operations into the graph, we do not update the
mapping, assuming that insertion points in the graph are never adjacent.
With that restriction, an out of date mapping still works fine.
Args:
graph: Graph to process.
"""
self.mapping = collections.defaultdict(set)
for op in (op for op in graph.get_operations()):
if op.name.startswith(common.SKIPPED_PREFIXES):
continue
for op_input in op.inputs:
self.mapping[op_input].add(op)
def ConsumerOperations(self, producer_op):
"""Looks through outputs of producer_op, finds ops that take them as input.
Args:
producer_op: Operation containing outputs to process.
Returns:
A Set[Operation] containing all operations taking input from producer_op
outputs.
"""
result = set()
for inp in producer_op.outputs:
result.update(self.mapping[inp])
return result
|
tensorflow-master
|
tensorflow/contrib/quantize/python/input_to_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities that match patterns in a tf.Graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
import six
@six.add_metaclass(abc.ABCMeta)
class Pattern(object):
"""The parent class of all patterns (e.g. OpTypePattern and OneofPattern)."""
@abc.abstractmethod
def match(self, op, tensor):
"""Returns the result of matching op/tensor against this pattern."""
raise NotImplementedError('Method "match" not implemented.')
class OpTypePattern(Pattern):
"""A tree pattern that matches TF expressions with certain op types."""
def __init__(self, op_type, name=None, inputs=None, ordered_inputs=True):
"""Initializes an OpTypePattern.
Args:
op_type: string that specifies the allowed types of the root. It can be
(1) an op type, e.g. 'Conv2D',
(2) '*', i.e. wildcard, or
(3) multiple op types separated by '|', e.g., 'Relu|Relu6'.
We could use regex strings, which might be worthwhile when we have many
similar TF op types.
name: Optional string. The name of the pattern that can be looked up in
MatchResult.
inputs: Optional list of `Pattern`s or strings that specify the
patterns for the inputs of a matching op. If None, this pattern accepts
any inputs of a matching op.
ordered_inputs: Defaults to True. If False, will match any op that
matches a permutation of the inputs.
Raises:
ValueError: if too many inputs are provided when order_inputs is False.
"""
self._op_type = op_type
self._name = name
if inputs is None:
inputs = []
if len(inputs) > 8:
raise ValueError(
'Only < 8 inputs are allowed when ordered_inputs is False.')
self._inputs = [
input_pattern
if isinstance(input_pattern, Pattern) else OpTypePattern(input_pattern)
for input_pattern in inputs
]
self._ordered_inputs = ordered_inputs
@property
def name(self):
return self._name
def match(self, op, tensor):
if self._op_type != '*':
if op.type not in self._op_type.split('|'):
return None
match_result = MatchResult()
match_result.add(self, op, tensor)
if not self._inputs:
# If pattern.inputs is empty, skips the rest and accepts all the inputs.
return match_result
if len(op.inputs) != len(self._inputs):
return None
input_patterns_list = [self._inputs]
# If order doesn't matter for the inputs, then make sure we match at least
# one permutation of the inputs.
if not self._ordered_inputs:
input_patterns_list = list(itertools.permutations(self._inputs))
for input_patterns in input_patterns_list:
match_failed = False
for input_tensor, input_pattern in zip(op.inputs, input_patterns):
input_match_result = input_pattern.match(input_tensor.op, input_tensor)
if input_match_result is None:
match_failed = True
break
match_result.merge_from(input_match_result)
if not match_failed:
return match_result
return None
class OneofPattern(Pattern):
"""Matches one of the given sub-patterns."""
def __init__(self, sub_patterns):
self._sub_patterns = sub_patterns
def match(self, op, tensor):
for sub_pattern in self._sub_patterns:
match_result = sub_pattern.match(op, tensor)
if match_result is not None:
return match_result
return None
class MatchResult(object):
r"""Encapsulates the result of a match done by GraphMatcher.
MatchResult contains a map from Pattern to the matching op and tensor.
When the matching op has multiple output tensors, the matching tensor is the
output tensor used by the matching op of the parent pattern. E.g., when we
match graph
- +
/ \y0 y1/ \
x split z
|
y (nodes are ops; edges are going up)
against add_pattern defined as
y1_pattern = OpTypePattern('*')
z_pattern = OpTypePattern('*')
add_pattern = OpTypePattern('+', inputs=[y1_pattern, z_pattern])
the matching op of `y1_pattern` is `split`, and the matching tensor of
`y1_pattern`
is `y1` not `y0`.
"""
def __init__(self):
self._pattern_to_op_tensor = {}
self._name_to_pattern = {}
def add(self, pattern, op, tensor):
self._pattern_to_op_tensor[pattern] = op, tensor
if pattern.name is not None:
if pattern.name in self._name_to_pattern:
raise ValueError(
'Name %s is already bound to another pattern' % pattern.name)
self._name_to_pattern[pattern.name] = pattern
def _to_pattern(self, pattern_or_name):
if isinstance(pattern_or_name, Pattern):
return pattern_or_name
if isinstance(pattern_or_name, str):
if pattern_or_name not in self._name_to_pattern:
return None
return self._name_to_pattern[pattern_or_name]
raise ValueError('pattern_or_name has type %s. Expect Pattern or str.' %
type(pattern_or_name))
def _get_op_tensor(self, pattern_or_name):
pattern = self._to_pattern(pattern_or_name)
if pattern is None:
return None
if pattern not in self._pattern_to_op_tensor:
return None
return self._pattern_to_op_tensor[pattern]
def get_op(self, pattern_or_name):
op_tensor = self._get_op_tensor(pattern_or_name)
return op_tensor[0] if op_tensor else None
def get_tensor(self, pattern_or_name):
op_tensor = self._get_op_tensor(pattern_or_name)
return op_tensor[1] if op_tensor else None
def merge_from(self, other_match_result):
# pylint: disable=protected-access
self._pattern_to_op_tensor.update(other_match_result._pattern_to_op_tensor)
self._name_to_pattern.update(other_match_result._name_to_pattern)
# pylint: enable=protected-access
class GraphMatcher(object):
"""Checks if a particular subgraph matches a given pattern."""
def __init__(self, pattern):
"""Initializes a GraphMatcher.
Args:
pattern: The `Pattern` against which `GraphMatcher` matches
subgraphs.
"""
self._pattern = pattern
def _match_pattern(self, pattern, op, tensor):
"""Returns whether an TF expression rooted at `op` matches `pattern`.
If there is a match, adds to `self._match_result` the matching op and tensor
with key `pattern`.
Args:
pattern: An `Pattern`.
op: A `tf.Operation` to match against the pattern.
tensor: the output `tf.Tensor` of `op` that is used by the matching op of
`pattern`'s parent. Can be None if `pattern` is already the root of the
pattern tree.
Returns:
True if an TF expression rooted at `op` matches `pattern`.
"""
match_result = pattern.match(op, tensor)
if match_result is None:
return False
self._match_result.merge_from(match_result)
return True
def match_op(self, op):
"""Matches `op` against `self._pattern`.
Args:
op: `tf.Operation` to match against the pattern.
Returns:
Returns a `MatchResult` if `op` matches the pattern; otherwise, returns
None.
"""
self._match_result = MatchResult()
if not self._match_pattern(self._pattern, op, tensor=None):
return None
return self._match_result
def match_ops(self, ops):
"""Matches each operation in `ops` against `self._pattern`.
Args:
ops: collection of `tf.Operation` to match against the pattern.
Yields:
`MatchResult` for each `tf.Operation` that matches the pattern.
"""
for op in ops:
match_result = self.match_op(op)
if match_result:
yield match_result
def match_graph(self, graph):
"""Matches each operation in `graph` against `self._pattern`.
Args:
graph: `tf.Graph` containing operations to match.
Yields:
`MatchResult` for each `tf.Operation` in `graph` that matches the pattern.
"""
# Python 3.3.2+ implements `yield from`, but for now:
for match_result in self.match_ops(graph.get_operations()):
yield match_result
|
tensorflow-master
|
tensorflow/contrib/quantize/python/graph_matcher.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for folding batch norm layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import fold_batch_norms
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import saver as saver_lib
batch_norm = layers.batch_norm
conv2d = layers.conv2d
fully_connected = layers.fully_connected
separable_conv2d = layers.separable_conv2d
# TODO(suharshs): Use parameterized test once OSS TF supports it.
class FoldBatchNormsTest(test_util.TensorFlowTestCase):
def _RunTestOverParameters(self, test_fn):
parameters_list = [
# (relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm,
# freeze_batch_norm_delay, insert identity node)
(nn_ops.relu6, 'Relu6', False, False, False, 100, False),
(nn_ops.relu, 'Relu', False, False, False, None, False),
(nn_ops.relu6, 'Relu6', True, False, False, 100, False),
(nn_ops.relu, 'Relu', True, False, False, None, False),
(nn_ops.relu6, 'Relu6', False, True, False, 100, False),
(nn_ops.relu, 'Relu', False, True, False, None, False),
(nn_ops.relu6, 'Relu6', True, True, False, 100, False),
(nn_ops.relu, 'Relu', True, True, False, None, False),
# Fused batch norm always has scaling enabled.
(nn_ops.relu6, 'Relu6', False, True, True, None, False),
(nn_ops.relu, 'Relu', False, True, True, 100, False),
(nn_ops.relu6, 'Relu6', True, True, True, None, False),
(nn_ops.relu, 'Relu', True, True, True, 100, False),
(nn_ops.relu6, 'Relu6', False, True, True, None, True),
(nn_ops.relu, 'Relu', False, True, True, 100, True),
(nn_ops.relu6, 'Relu6', True, True, True, None, True),
(nn_ops.relu, 'Relu', True, True, True, 100, True),
]
for params in parameters_list:
test_fn(params[0], params[1], params[2], params[3], params[4], params[5],
params[6])
def _TestFoldConv2d(self, relu, relu_op_name, with_bypass, has_scaling,
fused_batch_norm, freeze_batch_norm_delay,
insert_identity_node):
"""Tests folding cases: inputs -> Conv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
insert_identity_node: Bool, insert identity node between conv and batch
norm
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
name = 'test/test2' if with_bypass else 'test'
if insert_identity_node:
with g.name_scope(name):
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
conv_out = array_ops.identity(node, name='conv_out')
node = batch_norm(
conv_out,
center=True,
scale=has_scaling,
decay=1.0 - 0.003,
fused=fused_batch_norm)
if activation_fn is not None:
node = activation_fn(node)
conv_name = name + '/Conv'
else:
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=name)
conv_name = name
if with_bypass:
node = math_ops.add(inputs, node, name='test/AddV2')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(conv_name + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul, [
conv_name + '/correction_mult',
self._BatchNormMultiplierName(conv_name, has_scaling, fused_batch_norm)
])
self._AssertOutputGoesToOps(folded_mul, g, [conv_name + '/Conv2D_Fold'])
folded_conv = g.get_operation_by_name(conv_name + '/Conv2D_Fold')
self.assertEqual(folded_conv.type, 'Conv2D')
self._AssertInputOpsAre(folded_conv,
[conv_name + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [conv_name + '/post_conv_mul'])
folded_add = g.get_operation_by_name(conv_name + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
conv_name + '/correction_add',
self._BathNormBiasName(conv_name, fused_batch_norm)
])
output_op_names = ['test/AddV2' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
if freeze_batch_norm_delay is not None:
self._AssertMovingAveragesAreFrozen(g, name)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldConv2d(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunTestOverParameters(self._TestFoldConv2d)
def testMultipleLayerConv2d(self,
relu=nn_ops.relu,
relu_op_name='Relu',
has_scaling=True,
fused_batch_norm=False,
freeze_batch_norm_delay=None,
insert_identity_node=False):
"""Tests folding cases for a network with multiple layers.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
insert_identity_node: Bool, insert identity node between conv and batch
norm
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
out_depth = 3
stride = 1
activation_fn = relu
scope = 'topnet/testnet'
with variable_scope.variable_scope(scope, [inputs]):
layer1 = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=None,
scope='testnet/layer1')
# Add bn and relu with different scope
layer1 = batch_norm(
layer1, scale=has_scaling, fused=fused_batch_norm, scope='layer1')
layer1 = activation_fn(layer1)
layer2 = conv2d(
layer1,
2 * out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope='testnet/layer2')
# Add bn and relu with different scope
layer2 = batch_norm(
layer2, scale=has_scaling, fused=fused_batch_norm, scope='layer2')
_ = activation_fn(layer2)
scope = 'topnet/testnet/testnet/layer2'
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul, [
scope + '/correction_mult',
self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm)
])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/Conv2D_Fold'])
folded_conv = g.get_operation_by_name(scope + '/Conv2D_Fold')
self.assertEqual(folded_conv.type, 'Conv2D')
# Remove :0 at end of name for tensor prior to comparison
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', layer1.name[:-2]])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
scope + '/correction_add',
self._BathNormBiasName(scope, fused_batch_norm)
])
output_op_names = [scope + '/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
if freeze_batch_norm_delay is not None:
self._AssertMovingAveragesAreFrozen(g, scope)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def _TestFoldConv2dUnknownShape(self,
relu,
relu_op_name,
with_bypass,
has_scaling,
fused_batch_norm,
freeze_batch_norm_delay,
insert_identity_node=False):
"""Tests folding cases: inputs -> Conv2d with batch norm -> Relu*.
Tests that folding works even with an input shape where some dimensions are
not known (i.e. None).
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
insert_identity_node: Bool, insert identity node between conv and batch
norm
"""
g = ops.Graph()
with g.as_default():
inputs = array_ops.placeholder(dtypes.float32, shape=(5, None, None, 3))
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/AddV2')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul, [
scope + '/correction_mult',
self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm)
])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/Conv2D_Fold'])
folded_conv = g.get_operation_by_name(scope + '/Conv2D_Fold')
self.assertEqual(folded_conv.type, 'Conv2D')
self._AssertInputOpsAre(folded_conv, [scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
scope + '/correction_add',
self._BathNormBiasName(scope, fused_batch_norm)
])
output_op_names = ['test/AddV2' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
if freeze_batch_norm_delay is not None:
self._AssertMovingAveragesAreFrozen(g, scope)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldConv2dUnknownShape(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunTestOverParameters(self._TestFoldConv2dUnknownShape)
def _TestFoldFullyConnectedLayer(
self, relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm,
freeze_batch_norm_delay, insert_identity_node):
"""Tests folding cases: inputs -> FC with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
insert_identity_node: Bool, insert identity node between conv and batch
norm
"""
g = ops.Graph()
with g.as_default():
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
activation_fn = None if with_bypass else relu
name = 'test/test2' if with_bypass else 'test'
insert_identity_node = fused_batch_norm
if insert_identity_node:
with g.name_scope(name):
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
node = array_ops.identity(node, name='fc_out')
node = batch_norm(
node,
center=True,
scale=has_scaling,
decay=1.0 - 0.003,
fused=fused_batch_norm)
if activation_fn is not None:
node = activation_fn(node)
fc_name = name + '/fully_connected'
else:
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=name)
fc_name = name
if with_bypass:
node = math_ops.add(inputs, node, name='test/AddV2')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(fc_name + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul, [
fc_name + '/correction_mult',
self._BatchNormMultiplierName(fc_name, has_scaling, fused_batch_norm)
])
self._AssertOutputGoesToOps(folded_mul, g, [fc_name + '/MatMul_Fold'])
folded_conv = g.get_operation_by_name(fc_name + '/MatMul_Fold')
self.assertEqual(folded_conv.type, 'MatMul')
self._AssertInputOpsAre(folded_conv,
[fc_name + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [fc_name + '/post_conv_mul'])
folded_add = g.get_operation_by_name(fc_name + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
fc_name + '/correction_add',
self._BathNormBiasName(fc_name, fused_batch_norm)
])
output_op_names = ['test/AddV2' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
if freeze_batch_norm_delay is not None:
self._AssertMovingAveragesAreFrozen(g, name)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldFullyConnectedLayer(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunTestOverParameters(self._TestFoldFullyConnectedLayer)
def _TestFoldDepthwiseConv2d(self, relu, relu_op_name, with_bypass,
has_scaling, fused_batch_norm,
freeze_batch_norm_delay, insert_identity_node):
"""Tests folding: inputs -> DepthwiseConv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
insert_identity_node: Bool, insert identity node between conv and batch
norm switches to using frozen mean and variance
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
name = 'test/test2' if with_bypass else 'test'
if insert_identity_node:
with g.name_scope(name):
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
node = array_ops.identity(node, name='sep_conv_out')
node = batch_norm(
node,
center=True,
scale=has_scaling,
decay=1.0 - 0.003,
fused=fused_batch_norm)
if activation_fn is not None:
node = activation_fn(node)
sep_conv_name = name + '/SeparableConv2d'
else:
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=name)
sep_conv_name = name
if with_bypass:
node = math_ops.add(inputs, node, name='test/AddV2')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(sep_conv_name + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
if fused_batch_norm:
scale_reshape_op_name = sep_conv_name + '/BatchNorm_Fold/scale_reshape'
else:
scale_reshape_op_name = sep_conv_name + '/scale_reshape'
self._AssertInputOpsAre(
folded_mul, [sep_conv_name + '/correction_mult', scale_reshape_op_name])
self._AssertOutputGoesToOps(folded_mul, g,
[sep_conv_name + '/depthwise_Fold'])
scale_reshape = g.get_operation_by_name(scale_reshape_op_name)
self.assertEqual(scale_reshape.type, 'Reshape')
self._AssertInputOpsAre(scale_reshape, [
self._BatchNormMultiplierName(sep_conv_name, has_scaling,
fused_batch_norm),
scale_reshape_op_name + '/shape'
])
self._AssertOutputGoesToOps(scale_reshape, g, [sep_conv_name + '/mul_fold'])
folded_conv = g.get_operation_by_name(sep_conv_name + '/depthwise_Fold')
self.assertEqual(folded_conv.type, 'DepthwiseConv2dNative')
self._AssertInputOpsAre(folded_conv,
[sep_conv_name + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g,
[sep_conv_name + '/post_conv_mul'])
folded_add = g.get_operation_by_name(sep_conv_name + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
sep_conv_name + '/correction_add',
self._BathNormBiasName(sep_conv_name, fused_batch_norm)
])
output_op_names = ['test/AddV2' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
if freeze_batch_norm_delay is not None:
self._AssertMovingAveragesAreFrozen(g, name)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldDepthwiseConv2d(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunTestOverParameters(self._TestFoldDepthwiseConv2d)
def _TestFoldAtrousConv2d(self, relu, relu_op_name, with_bypass, has_scaling,
fused_batch_norm, freeze_batch_norm_delay,
insert_identity_node):
"""Tests folding: inputs -> AtrousConv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
insert_identity_node: Bool, insert identity node between conv and batch
norm
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
dilation_rate = 2
activation_fn = None if with_bypass else relu
name = 'test/test2' if with_bypass else 'test'
if insert_identity_node:
with g.name_scope(name):
node = separable_conv2d(
inputs,
None, [3, 3],
rate=dilation_rate,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
node = array_ops.identity(node, name='sep_conv_out')
node = batch_norm(
node,
center=True,
scale=has_scaling,
decay=1.0 - 0.003,
fused=fused_batch_norm)
if activation_fn is not None:
node = activation_fn(node)
sep_conv_name = name + '/SeparableConv2d'
else:
node = separable_conv2d(
inputs,
None, [3, 3],
rate=dilation_rate,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=name)
sep_conv_name = name
if with_bypass:
node = math_ops.add(inputs, node, name='test/AddV2')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(sep_conv_name + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
if fused_batch_norm:
scale_reshape_op_name = sep_conv_name + '/BatchNorm_Fold/scale_reshape'
else:
scale_reshape_op_name = sep_conv_name + '/scale_reshape'
self._AssertInputOpsAre(
folded_mul, [sep_conv_name + '/correction_mult', scale_reshape_op_name])
self._AssertOutputGoesToOps(folded_mul, g,
[sep_conv_name + '/depthwise_Fold'])
scale_reshape = g.get_operation_by_name(scale_reshape_op_name)
self.assertEqual(scale_reshape.type, 'Reshape')
self._AssertInputOpsAre(scale_reshape, [
self._BatchNormMultiplierName(sep_conv_name, has_scaling,
fused_batch_norm),
scale_reshape_op_name + '/shape'
])
self._AssertOutputGoesToOps(scale_reshape, g, [sep_conv_name + '/mul_fold'])
folded_conv = g.get_operation_by_name(sep_conv_name + '/depthwise_Fold')
self.assertEqual(folded_conv.type, 'DepthwiseConv2dNative')
self._AssertInputOpsAre(folded_conv, [
sep_conv_name + '/mul_fold', sep_conv_name + '/depthwise/SpaceToBatchND'
])
if fused_batch_norm:
self._AssertOutputGoesToOps(folded_conv, g,
[sep_conv_name + '/BatchToSpaceND_Fold'])
else:
self._AssertOutputGoesToOps(
folded_conv, g, [sep_conv_name + '/depthwise/BatchToSpaceND_Fold'])
folded_add = g.get_operation_by_name(sep_conv_name + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
sep_conv_name + '/correction_add',
self._BathNormBiasName(sep_conv_name, fused_batch_norm)
])
output_op_names = ['test/AddV2' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
if freeze_batch_norm_delay is not None:
self._AssertMovingAveragesAreFrozen(g, name)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldAtrousConv2d(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunTestOverParameters(self._TestFoldAtrousConv2d)
def _TestCompareFoldAndUnfolded(self,
relu,
relu_op_name,
with_bypass,
has_scaling,
fused_batch_norm,
freeze_batch_norm_delay,
insert_identity_node=False):
"""Tests that running folded and unfolded BN returns the same results.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
insert_identity_node: Bool, insert identity node between conv and batch
norm
"""
random_seed.set_random_seed(1234)
unfolded_g = ops.Graph()
with unfolded_g.as_default():
batch_size, height, width = 5, 128, 128
inputs = random_ops.random_uniform(
(batch_size, height, width, 3), dtype=dtypes.float32, seed=1234)
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/AddV2')
relu_node = relu(node, name='test/' + relu_op_name)
folded_g = self._CopyGraph(unfolded_g)
with folded_g.as_default():
fold_batch_norms.FoldBatchNorms(
folded_g,
is_training=True,
freeze_batch_norm_delay=freeze_batch_norm_delay)
with session.Session(graph=unfolded_g) as sess:
sess.run(variables.global_variables_initializer())
grad_node = gradients.gradients(relu_node, inputs)
results = sess.run([relu_node, grad_node])
unfolded_forward, unfolded_backward = results[0], results[1]
with session.Session(graph=folded_g) as sess:
sess.run(variables.global_variables_initializer())
relu_node = folded_g.get_tensor_by_name(relu_node.name)
inputs = folded_g.get_tensor_by_name(inputs.name)
grad_node = gradients.gradients(relu_node, inputs)
results = sess.run([relu_node, grad_node])
folded_forward, folded_backward = results[0], results[1]
# Check that the folded and unfolded results match.
self.assertAllClose(unfolded_forward, folded_forward, atol=1e-3)
self.assertAllClose(unfolded_backward, folded_backward, atol=1e-3)
def testCompareFoldAndUnfolded(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunTestOverParameters(self._TestCompareFoldAndUnfolded)
def _BatchNormParams(self, scale=True, fused=False):
return {
'center': True,
'scale': scale,
'decay': 1.0 - 0.003,
'fused': fused
}
def _BatchNormMultiplierName(self, scope, has_scaling, fused):
if has_scaling:
if fused:
return scope + '/BatchNorm_Fold/mul'
return scope + '/BatchNorm/batchnorm_1/mul'
return scope + '/BatchNorm/batchnorm_1/Rsqrt'
def _BathNormBiasName(self, scope, fused):
if fused:
return scope + '/BatchNorm_Fold/bias'
return scope + '/BatchNorm/batchnorm_1/sub'
def _WeightInit(self, stddev):
"""Returns a truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initializer that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev, seed=1234)
def _AssertInputOpsAre(self, op, in_op_names):
"""Asserts that all inputs to op come from in_op_names (disregarding order).
Args:
op: Operation to check inputs for.
in_op_names: List of strings, operations where all op's inputs should
come from.
"""
expected_inputs = [in_op_name + ':0' for in_op_name in in_op_names]
self.assertItemsEqual([t.name for t in op.inputs], expected_inputs)
def _AssertOutputGoesToOps(self, op, graph, out_op_names):
"""Asserts that outputs from op go to out_op_names (and perhaps others).
Args:
op: Operation to check outputs for.
graph: Graph where output operations are located.
out_op_names: List of strings, operations where op's outputs should go.
"""
for out_op_name in out_op_names:
out_op = graph.get_operation_by_name(out_op_name)
self.assertIn(op.outputs[0].name, [str(t.name) for t in out_op.inputs])
def _AssertMovingAveragesAreFrozen(self, graph, scope):
"""Asserts to check if moving mean and variance are frozen.
Args:
graph: Graph where the operations are located.
scope: Scope of batch norm op
"""
moving_average_mult = graph.get_operation_by_name(
scope + '/BatchNorm/AssignMovingAvg/mul')
self.assertTrue(
moving_average_mult.inputs[1].name.find('freeze_moving_mean/Merge') > 0)
moving_var_mult = graph.get_operation_by_name(
scope + '/BatchNorm/AssignMovingAvg_1/mul')
self.assertTrue(
moving_var_mult.inputs[1].name.find('freeze_moving_var/Merge') > 0)
def _CopyGraph(self, graph):
"""Return a copy of graph."""
meta_graph = saver_lib.export_meta_graph(
graph=graph, collection_list=graph.get_all_collection_keys())
graph_copy = ops.Graph()
with graph_copy.as_default():
_ = saver_lib.import_meta_graph(meta_graph)
return graph_copy
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/quantize/python/fold_batch_norms_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities used across this package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
# Skip all operations that are backprop related or export summaries.
SKIPPED_PREFIXES = (
'gradients/', 'RMSProp/', 'Adagrad/', 'Const_', 'HistogramSummary',
'ScalarSummary')
# Valid activation ops for quantization end points.
_ACTIVATION_OP_SUFFIXES = ['Relu6', 'Relu', 'Identity']
# Regular expression for recognizing nodes that are part of batch norm group.
_BATCHNORM_RE = re.compile(r'^(.*)BatchNorm/batchnorm')
def BatchNormGroups(graph):
"""Finds batch norm layers, returns their prefixes as a list of strings.
Args:
graph: Graph to inspect.
Returns:
List of strings, prefixes of batch norm group names found.
"""
bns = []
for op in graph.get_operations():
match = _BATCHNORM_RE.search(op.name)
if match:
bn = match.group(1)
if not bn.startswith(SKIPPED_PREFIXES):
bns.append(bn)
# Filter out duplicates.
return list(collections.OrderedDict.fromkeys(bns))
def GetEndpointActivationOp(graph, prefix):
"""Returns an Operation with the given prefix and a valid end point suffix.
Args:
graph: Graph where to look for the operation.
prefix: String, prefix of Operation to return.
Returns:
The Operation with the given prefix and a valid end point suffix or None if
there are no matching operations in the graph for any valid suffix
"""
for suffix in _ACTIVATION_OP_SUFFIXES:
activation = _GetOperationByNameDontThrow(graph, prefix + suffix)
if activation:
return activation
return None
def _GetOperationByNameDontThrow(graph, name):
"""Returns an Operation with the given name.
Args:
graph: Graph where to look for the operation.
name: String, name of Operation to return.
Returns:
The Operation with the given name. None if the name does not correspond to
any operation in the graph
"""
try:
return graph.get_operation_by_name(name)
except KeyError:
return None
def CreateOrGetQuantizationStep():
"""Returns a Tensor of the number of steps the quantized graph has run.
Returns:
Quantization step Tensor.
"""
quantization_step_name = 'fake_quantization_step'
quantization_step_tensor_name = quantization_step_name + '/Identity:0'
g = ops.get_default_graph()
try:
return g.get_tensor_by_name(quantization_step_tensor_name)
except KeyError:
# Create in proper graph and base name_scope.
with g.name_scope(None):
quantization_step_tensor = variable_scope.get_variable(
quantization_step_name,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
with g.name_scope(quantization_step_tensor.op.name + '/'):
# We return the incremented variable tensor. Since this is used in conds
# for quant_delay and freeze_bn_delay, it will run once per graph
# execution. We return an identity to force resource variables and
# normal variables to return a tensor of the same name.
return array_ops.identity(
state_ops.assign_add(quantization_step_tensor, 1))
def DropStringPrefix(s, prefix):
"""If the string starts with this prefix, drops it."""
if s.startswith(prefix):
return s[len(prefix):]
else:
return s
def RerouteTensor(t0, t1, can_modify=None):
"""Reroute the end of the tensor t0 to the ends of the tensor t1.
Args:
t0: a tf.Tensor.
t1: a tf.Tensor.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
Returns:
The number of individual modifications made by the function.
"""
nb_update_inputs = 0
consumers = t1.consumers()
if can_modify is not None:
consumers = [c for c in consumers if c in can_modify]
consumers_indices = {}
for c in consumers:
consumers_indices[c] = [i for i, t in enumerate(c.inputs) if t is t1]
for c in consumers:
for i in consumers_indices[c]:
c._update_input(i, t0) # pylint: disable=protected-access
nb_update_inputs += 1
return nb_update_inputs
|
tensorflow-master
|
tensorflow/contrib/quantize/python/common.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to fold batch norm into preceding convolution or FC layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
def FoldBatchNorms(graph, is_training, freeze_batch_norm_delay=None):
"""Finds batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, true if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization. This value is used
only when is_training is True.
Raises:
ValueError: When batch norm folding fails.
"""
_FoldFusedBatchNorms(
graph, is_training, freeze_batch_norm_delay=freeze_batch_norm_delay)
_FoldUnfusedBatchNorms(
graph,
is_training=is_training,
freeze_batch_norm_delay=freeze_batch_norm_delay)
def _FoldFusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
"""Finds fused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, true if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
Raises:
ValueError: When batch norm folding fails.
"""
for match in _FindFusedBatchNorms(graph):
scope, sep, _ = match.layer_op.name.rpartition('/')
# Make sure new ops are added to `graph` and put on the same device as
# `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope
# named `scope`. Otherwise, TF creates a unique scope whose name starts with
# `scope`.
with graph.as_default(), graph.name_scope(scope + sep):
with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):
# new weights = old weights * gamma / sqrt(variance + epsilon)
# new biases = -mean * gamma / sqrt(variance + epsilon) + beta
multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(
match.variance_tensor + match.bn_op.get_attr('epsilon'))
bias_tensor = math_ops.subtract(
match.beta_tensor,
match.mean_tensor * multiplier_tensor,
name='bias')
correction_scale, correction_recip, correction_offset = None, None, None
if is_training:
correction_scale, correction_recip, correction_offset = (
_ComputeBatchNormCorrections(
context='',
match=match,
freeze_batch_norm_delay=freeze_batch_norm_delay))
# The shape of depthwise weights is different, so we need to reshape the
# multiplier_tensor to ensure that the scaled_weight_tensor has the
# expected shape.
weights = match.weight_tensor
if match.layer_op.type == 'DepthwiseConv2dNative':
new_shape = [
match.weight_tensor.get_shape().as_list()[2],
match.weight_tensor.get_shape().as_list()[3]
]
multiplier_tensor = array_ops.reshape(
multiplier_tensor, new_shape, name='scale_reshape')
if correction_scale is not None:
correction_scale = array_ops.reshape(
correction_scale, new_shape, name='correction_reshape')
if correction_scale is not None:
weights = math_ops.multiply(
correction_scale, weights, name='correction_mult')
scaled_weight_tensor = math_ops.multiply(
weights, multiplier_tensor, name='mul_fold')
new_layer_tensor = _CloneWithNewOperands(
match.layer_op, match.input_tensor, scaled_weight_tensor,
match.batch_to_space_op)
if correction_recip is not None:
new_layer_tensor = math_ops.multiply(
correction_recip, new_layer_tensor, name='post_conv_mul')
new_layer_tensor = math_ops.add(new_layer_tensor, (correction_offset),
'correction_add')
bias_add_tensor = math_ops.add(
new_layer_tensor, bias_tensor, name='add_fold')
nodes_modified_count = common.RerouteTensor(bias_add_tensor,
match.output_tensor)
if nodes_modified_count == 0:
raise ValueError('Folding batch norms failed, %s had no outputs.' %
match.output_tensor.name)
def _FindFusedBatchNorms(graph):
"""Finds all ops and tensors related to found FusedBatchNorms.
Args:
graph: Graph to inspect.
Returns:
_FusedBatchNormMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
# In practice, the weight pattern can match a Variable or a SpaceToBatchND
# operation that follows a variable for atrous convolutions.
weight_pattern = graph_matcher.OpTypePattern('*')
gamma_pattern = graph_matcher.OpTypePattern('*')
beta_pattern = graph_matcher.OpTypePattern('*')
mean_pattern = graph_matcher.OpTypePattern('*')
variance_pattern = graph_matcher.OpTypePattern('*')
moving_average_pattern = graph_matcher.OpTypePattern('*')
bn_decay_pattern = graph_matcher.OpTypePattern('*')
layer_pattern = graph_matcher.OpTypePattern(
'Conv2D|DepthwiseConv2dNative|MatMul',
inputs=[input_pattern, weight_pattern])
batch_to_space_pattern = graph_matcher.OpTypePattern(
'BatchToSpaceND',
inputs=[
layer_pattern,
graph_matcher.OpTypePattern('*'),
graph_matcher.OpTypePattern('*')
])
# Identity between conv/matmul and bn
layer_pattern_with_identity = graph_matcher.OpTypePattern(
'Identity',
inputs=[
graph_matcher.OneofPattern([batch_to_space_pattern, layer_pattern])
])
layer_output_pattern = graph_matcher.OneofPattern(
[layer_pattern_with_identity, layer_pattern, batch_to_space_pattern])
# MatMul has a Reshape between it and FusedBatchNorm.
matmul_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape',
inputs=[layer_output_pattern,
graph_matcher.OpTypePattern('*')])
batch_norm_pattern = graph_matcher.OpTypePattern(
'FusedBatchNorm|FusedBatchNormV3',
inputs=[
graph_matcher.OneofPattern(
[matmul_reshape_pattern, layer_output_pattern]), gamma_pattern,
beta_pattern, mean_pattern, variance_pattern
])
matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[batch_norm_pattern,
graph_matcher.OpTypePattern('*')])
batch_norm_identity_pattern = graph_matcher.OpTypePattern(
'Identity', inputs=[batch_norm_pattern, matmul_bn_output_reshape_pattern])
bn_identity_matcher = graph_matcher.GraphMatcher(batch_norm_identity_pattern)
bn_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern(
[matmul_bn_output_reshape_pattern, batch_norm_pattern]))
moving_average_sub_pattern = graph_matcher.OpTypePattern(
'Sub', inputs=[moving_average_pattern, batch_norm_pattern])
moving_average_mul_pattern = graph_matcher.OpTypePattern(
'Mul', inputs=[moving_average_sub_pattern, bn_decay_pattern])
moving_avg_mul_matcher = graph_matcher.GraphMatcher(
moving_average_mul_pattern)
def _GetLayerMatch(match_result):
"""Populates a layer match object containing ops/tensors for folding BNs.
Args:
match_result: Matched result from graph matcher
Returns:
layer_op: Matching conv/fc op prior to batch norm
BatchNormMatch: _BatchNormMatch containing all required batch norm
parameters.
"""
moving_mean_tensor = None
moving_variance_tensor = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
batch_to_space_op = None
layer_op = match_result.get_op(layer_pattern)
layer_tensor = match_result.get_tensor(layer_pattern)
bn_id_op = match_result.get_op(batch_norm_identity_pattern)
bn_op = match_result.get_op(batch_norm_pattern)
if bn_id_op is None:
bn_id_op = bn_op
batch_epsilon = bn_op.get_attr('epsilon')
# In the MatMul case, the output of batch norm is reshaped back into a
# 2D tensor, so the output_tensor is the output of the Reshape op.
output_tensor = bn_op.outputs[0]
if layer_op.type == 'MatMul':
output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)
# If the matcher didn't match matmul_bn_output_reshape, there will be
# another match for this 'MatMul' later, so we can skip this one.
if output_reshape_op is None:
return None, None
output_tensor = output_reshape_op.outputs[0]
# Ensure that the output tensor has consumers, otherwise this is a dangling
# node and not a match.
if not output_tensor.consumers():
return None, None
batch_to_space_op = match_result.get_op(batch_to_space_pattern)
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
beta_tensor = match_result.get_tensor(beta_pattern)
# FusedBatchNorm in training is different from that in inference. It takes
# empty 'mean' and empty 'variance', and produces the mean and the variance
# of the batch. Therefore, when is_training is true, mean_tensor and
# variance_tensor point to 1st and 2nd (0-based) output of bn_op,
# respectively; when is_training is false, they point to bn_op's inputs.
is_training = bn_op.get_attr('is_training')
if is_training:
# FusedBatchNormGrad doesn't compute gradients of the batch_mean and
# batch_variance outputs, so we need to substitute our own custom
# gradient.
# TODO(suharshs, raghuramank): Find a way to avoid needing this hack.
# pylint: disable=protected-access
bn_op._set_attr(
'_gradient_op_type',
attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))
# pylint: enable=protected-access
mean_tensor = bn_op.outputs[1]
# The batch variance used during forward and backward prop is biased,
# i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average
# calculation, the variance is corrected by the term N/N-1 (Bessel's
# correction). The variance tensor read from FuseBatchNorm has Bessel's
# correction applied, so we undo it here.
scope, sep, _ = bn_op.name.rpartition('/')
g = ops.get_default_graph()
with g.as_default(), g.name_scope(scope + sep):
n = math_ops.cast(
array_ops.size(layer_tensor) / array_ops.size(mean_tensor),
dtypes.float32)
variance_tensor = math_ops.multiply(
bn_op.outputs[2], (n - 1) / n, name='Undo_Bessel_Correction')
# TODO(suharshs): Find a way to get rid of this inner match.
for mul_match_result in moving_avg_mul_matcher.match_graph(graph):
sub_op = mul_match_result.get_op(moving_average_sub_pattern)
if sub_op.inputs[1].name == bn_op.outputs[1].name:
# During training: Batch Mean is bn_op.outputs[1]
moving_mean_tensor = sub_op.inputs[0]
bn_decay_mean_tensor = mul_match_result.get_tensor(bn_decay_pattern)
if sub_op.inputs[1].name == bn_op.outputs[2].name:
# During training: Batch Var is bn_op.outputs[2]
moving_variance_tensor = sub_op.inputs[0]
bn_decay_var_tensor = mul_match_result.get_tensor(bn_decay_pattern)
else:
mean_tensor = match_result.get_tensor(mean_pattern)
variance_tensor = match_result.get_tensor(variance_pattern)
return layer_op, _BatchNormMatch(
layer_op=layer_op,
bn_op=bn_op,
output_tensor=output_tensor,
input_tensor=input_tensor,
weight_tensor=weight_tensor,
gamma_tensor=gamma_tensor,
beta_tensor=beta_tensor,
mean_tensor=mean_tensor,
variance_tensor=variance_tensor,
moving_mean_tensor=moving_mean_tensor,
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
batch_epsilon=batch_epsilon,
batch_to_space_op=batch_to_space_op)
layer_matches = []
# We use matched_layer_set to ensure that layers aren't matched multiple
# times.
matched_layer_set = set()
for match_result in bn_identity_matcher.match_graph(graph):
layer_op, layer_match = _GetLayerMatch(match_result)
if layer_op is not None:
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(layer_match)
for match_result in bn_matcher.match_graph(graph):
layer_op, layer_match = _GetLayerMatch(match_result)
if layer_op is not None:
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(layer_match)
return layer_matches
def _ComputeBatchNormCorrections(context, match, freeze_batch_norm_delay):
"""Computes batch norm correction params.
Before batch normalization is frozen:
We use batch statistics for batch norm.
correction_scale = sigma_b/sigma_mv
correction_recip = 1/correction_scale
correction_offset = 0
After batch normalization is frozen:
correction_scale = sigma_b/sigma_mv
correction_recip = 1
correction_offset = gamma*(mu_b/sigma_b-mu_mv/sigma_mv).
Batch norm is frozen if global_step > bn_freeze_delay.
The corrections ensure that:
a) The weights are quantized after scaling by gamma/sigma_mv. This enables
smoother training as the scaling on the weights changes slowly, rather than
jump across mini-batches
b) Changing the values of the corrections allows for one to switch between
using batch statistics to using moving mean and average, without requiring
changes to batch_norm
Args:
context: The scope under which we look for batch norm params
match: Object containing required batch norm tensors for correction
computation.
freeze_batch_norm_delay: Delay in steps at which computation switches
from regular batch norm to frozen mean and variance.
Returns:
A tuple of correction_scale, correction_recip, correction_offset
"""
g = ops.get_default_graph()
prefix = '' if not context else context
with g.name_scope(prefix + 'batch_norm_correction'):
recip_sigma_mv = math_ops.rsqrt(
match.moving_variance_tensor + match.batch_epsilon)
recip_sigma = math_ops.rsqrt(match.variance_tensor + match.batch_epsilon)
correction_scale = math_ops.divide(
recip_sigma_mv, recip_sigma, name='scale_compute')
correction_scale = array_ops.identity(
correction_scale, name='correction_scale')
correction_recip = math_ops.reciprocal(
correction_scale, name='reciprocal_compute')
correction_offset = math_ops.multiply(
match.gamma_tensor,
match.mean_tensor * recip_sigma -
match.moving_mean_tensor * recip_sigma_mv,
name='offset_compute')
if freeze_batch_norm_delay is not None:
use_mv_avg = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
freeze_batch_norm_delay,
name='use_moving_average')
else:
use_mv_avg = False
bn_decay_zero = 0.0
bn_decay_mean_consumers = list(match.bn_decay_mean_tensor.consumers())
bn_decay_var_consumers = list(match.bn_decay_mean_tensor.consumers())
bn_decay_mean_out = utils.smart_cond(
use_mv_avg,
lambda: bn_decay_zero,
lambda: match.bn_decay_mean_tensor,
name='freeze_moving_mean')
common.RerouteTensor(
bn_decay_mean_out,
match.bn_decay_mean_tensor,
can_modify=bn_decay_mean_consumers)
bn_decay_var_consumers = list(match.bn_decay_var_tensor.consumers())
bn_decay_var_out = utils.smart_cond(
use_mv_avg,
lambda: bn_decay_zero,
lambda: match.bn_decay_var_tensor,
name='freeze_moving_var')
common.RerouteTensor(
bn_decay_var_out,
match.bn_decay_var_tensor,
can_modify=bn_decay_var_consumers)
correction_recip = utils.smart_cond(
use_mv_avg,
lambda: array_ops.ones(correction_scale.shape),
lambda: correction_recip,
name='correction_recip')
correction_offset = utils.smart_cond(
use_mv_avg,
lambda: correction_offset,
lambda: array_ops.zeros(correction_offset.shape),
name='correction_offset')
return correction_scale, correction_recip, correction_offset
def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor,
batch_to_space_op):
"""Clones layer_op with input_tensor and weight_tensor as new inputs."""
new_layer_name = layer_op.name.split('/')[-1] + '_Fold'
if layer_op.type == 'Conv2D':
return nn_ops.conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
use_cudnn_on_gpu=layer_op.get_attr('use_cudnn_on_gpu'),
data_format=layer_op.get_attr('data_format').decode(),
name=new_layer_name)
elif layer_op.type == 'MatMul':
return math_ops.matmul(
input_tensor,
weight_tensor,
transpose_a=layer_op.get_attr('transpose_a'),
transpose_b=layer_op.get_attr('transpose_b'),
name=new_layer_name)
elif layer_op.type == 'DepthwiseConv2dNative':
# We don't copy dilation rate because we reuse the input SpaceToBatch
# and create our own BatchToSpace operation below.
conv = nn.depthwise_conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
name=new_layer_name)
# Copy the batch to space operation if we have a atrous convolution.
if batch_to_space_op:
batch_to_space_op = layer_op.outputs[0].consumers()[0]
# TODO(suharshs): It's hard to make this name match with the unfused name.
# Restructure this code to not rely on scope at all.
new_batch_to_space_name = batch_to_space_op.name.split('/')[-1] + '_Fold'
conv = array_ops.batch_to_space_nd(
conv,
batch_to_space_op.inputs[1],
batch_to_space_op.inputs[2],
name=new_batch_to_space_name)
return conv
else:
raise ValueError('Cannot handle operation of type: %s' % layer_op.type)
@ops.RegisterGradient('FoldFusedBatchNormGrad')
def _FoldFusedBatchNormGrad(op,
unused_grad_y,
grad_mean,
grad_var,
unused_1,
unused_2,
unused_3=None):
"""Gradient function for the FusedBatchNorm ops matched by _GetLayerMatch."""
x = op.inputs[0]
n = math_ops.cast(
array_ops.size(x) / array_ops.size(grad_mean), dtypes.float32)
dmean_dx = grad_mean / n
dvar_dx = 2 * grad_var * (x - op.outputs[1]) / (n - 1)
return (dmean_dx + dvar_dx), None, None, None, None
def _FoldUnfusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
"""Finds unfused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, True if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
Raises:
ValueError: When batch norm folding fails.
"""
input_to_ops_map = input_to_ops.InputToOps(graph)
for bn in common.BatchNormGroups(graph):
has_scaling = _HasScaling(graph, input_to_ops_map, bn)
if not _IsValidUnfusedBatchNorm(graph, bn):
continue
# The mangling code intimately depends on BatchNorm node's internals.
original_op, folded_op = _CreateFoldedOp(
graph,
bn,
has_scaling=has_scaling,
freeze_batch_norm_delay=freeze_batch_norm_delay,
is_training=is_training)
activation = common.GetEndpointActivationOp(graph, bn)
if activation:
nodes_modified_count = common.RerouteTensor(
folded_op.outputs[0], original_op.outputs[0], can_modify=[activation])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % activation.name)
continue
# Treat consumer ops in bypass modules differently since they have Add
# operations instead of Relu* above.
# Changes to make sure that the correct scope is selected for the bypass add
# The rule here is that if the scope is of the form: str1/str2 for the
# batch norm,
# the bypass add is at scope str1. If bn is of scope just str1, then the
# bypass add is at scope ''.
# If there is no batch norm, then there is no bypass add.
add_bypass_ctx = ''
if bn:
try:
add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)
except AttributeError:
add_bypass_ctx = ''
if add_bypass_ctx:
add_bypass_ctx = add_bypass_ctx + '/'
add_bypass = graph.get_operation_by_name(add_bypass_ctx + 'AddV2')
nodes_modified_count = common.RerouteTensor(
folded_op.outputs[0], original_op.outputs[0], can_modify=[add_bypass])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)
def _IsValidUnfusedBatchNorm(graph, context):
"""Checks that the output of the unfused batch norm has consumers."""
add_shift = graph.get_operation_by_name(context +
'BatchNorm/batchnorm_1/add_1')
# Ensure that the output tensor of batch norm has consumers, otherwise this
# is a dangling node and not a match.
return bool(add_shift.outputs[0].consumers())
def _FindMatchingTensor(graph, match_pattern, scope):
"""Finds best match of ops matching match_pattern with scope.
Example: _FindMatchingTensor(graph,'/BatchNorm/moments/Squeeze',
'MobilenetV1/MobilenetV1/Conv2d_0/') returns:
Tensor('MobilenetV1/Conv2d_0/BatchNorm/moments/Squeeze')
Args:
graph: Graph to inspect.
match_pattern: Part of the name of the op that we need to match, should
be present in the op's name
scope: The scope of the op. All the elements of the scope need not be
present in the op's name.
Returns:
Tensor from graph that provides the best match to the match_pattern and
scope
"""
oplist = graph.get_operations()
split_context = set(scope.split('/'))
match_dict = {}
for op in oplist:
if op.name.endswith(match_pattern):
split_name = op.name.split('/')
num_matches = len(set(split_name) & split_context)
if num_matches > 0 or not scope:
match_dict[op.name] = num_matches
# match_dict contains matching op names from graph with values being
# number of matches to scope. We pick the key with the most matches
if match_dict:
max_key = max(match_dict, key=match_dict.get)
return graph.get_tensor_by_name(max_key + ':0')
else:
return None
def _GetBatchNormParams(graph, context, has_scaling):
"""Extracts relevant tensors for folding batch norms.
Args:
graph: Graph to inspect.
context: The scope under which we look for batch norm params
has_scaling: Bool that specifies if scaling is done as part of batch norm.
Returns:
_BatchNormMatch containing all required batch norm parameters.
"""
gamma_tensor = None
batch_mean_tensor = None
batch_variance_tensor = None
moving_mean_tensor = None
moving_variance_tensor = None
batch_epsilon = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
# TODO(raghuramank) This code relies on string matching and needs to be
# updated if unfused batch norm continues to be widely used
# Matching variable names is brittle and relies on scoping
# conventions. Fused batch norm folding is more robust. Support for unfused
# batch norms will be deprecated as we move forward. Fused batch norms allow
# for faster training and should be used whenever possible.
# context contains part of the names of the tensors we are interested in:
# For MobilenetV1, the context has repetitions:
# MobilenetV1/MobilenetV1/Conv2d_3_depthwise
# when the moving_mean tensor has the name:
# MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_mean/read
# To pick the correct variable name, it is necessary to ignore the repeating
# header.
# For MobilenetV2, this problem does not exist:
# The context is: MobilenetV2/expanded_conv_3/depthwise
# and the names of the tensors start with a single MobilenetV2
# The moving mean for example, has the name:
# MobilenetV2/expanded_conv_3/depthwise/BatchNorm/moving_mean/read
# We identify the best match for an op by checking for
# 1. The suffix of the op is exactly matched
# 2. Maximum number of matches with the context.The matching
# score is given by the number of parts of context (split by /) that
# are present in the parts of the tensor name (again split by /).
# For example: scope= MobilenetV2/MobilenetV2/expanded_conv_3 and
# op.name = MobilenetV2/expanded_conv_3/depthwise/BatchNorm/moving_mean/read
# will have 2 matches,scope with a different conv layer will have one match.
op_suffix_mean = 'BatchNorm/moments/Squeeze'
op_suffix_variance = 'BatchNorm/moments/Squeeze_1'
op_suffix_epsilon = 'BatchNorm/batchnorm_1/add/y'
op_suffix_bn_decay_mean = 'BatchNorm/AssignMovingAvg/decay'
op_suffix_bn_decay_var = 'BatchNorm/AssignMovingAvg_1/decay'
if variable_scope.get_variable_scope().use_resource:
op_suffix_gamma = 'BatchNorm/gamma/Read/ReadVariableOp'
op_suffix_moving_variance = (
'BatchNorm/moving_variance/Read/ReadVariableOp')
op_suffix_moving_mean = ('BatchNorm/moving_mean/Read/ReadVariableOp')
else:
op_suffix_gamma = 'BatchNorm/gamma'
op_suffix_moving_variance = 'BatchNorm/moving_variance/read'
op_suffix_moving_mean = 'BatchNorm/moving_mean/read'
# Parse through list of ops to find relevant ops
batch_mean_tensor = _FindMatchingTensor(graph, op_suffix_mean, context)
batch_variance_tensor = _FindMatchingTensor(graph, op_suffix_variance,
context)
moving_mean_tensor = _FindMatchingTensor(graph, op_suffix_moving_mean,
context)
moving_variance_tensor = _FindMatchingTensor(graph, op_suffix_moving_variance,
context)
batch_epsilon = _FindMatchingTensor(graph, op_suffix_epsilon, context)
bn_decay_mean_tensor = _FindMatchingTensor(graph, op_suffix_bn_decay_mean,
context)
bn_decay_var_tensor = _FindMatchingTensor(graph, op_suffix_bn_decay_var,
context)
if batch_mean_tensor is None and moving_mean_tensor is None:
ValueError('Error folding unfused batch norms')
if has_scaling:
gamma_tensor = _FindMatchingTensor(graph, op_suffix_gamma, context)
if not has_scaling:
gamma_tensor = array_ops.ones(moving_mean_tensor.shape)
return _BatchNormMatch(
layer_op=None,
bn_op=None,
output_tensor=None,
input_tensor=None,
weight_tensor=None,
gamma_tensor=gamma_tensor,
beta_tensor=None,
mean_tensor=batch_mean_tensor,
variance_tensor=batch_variance_tensor,
moving_mean_tensor=moving_mean_tensor,
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
batch_epsilon=batch_epsilon,
batch_to_space_op=None)
def _CreateFoldedOp(graph, context, has_scaling, freeze_batch_norm_delay,
is_training):
"""Folds in batch norm layer into preceding convolution or FC layer.
Creates 3 new nodes, connects their inputs and adds them to the graph:
mul is cloned into mul_fold, Conv2D or MatMul, or DepthwiseConv2d is cloned
into respective *_Fold, add is cloned into add_fold.
Args:
graph: Graph to modify.
context: String, batch norm context, i.e. node into which BatchNorm is
nested.
has_scaling: Whether the batch norm has scaling enabled.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
is_training: Bool, true if training.
Raises:
ValueError: When operation type is not supported, or input and output tensor
shapes mismatch for created operations: mul_fold, add_fold.
Returns:
A pair of Operations, the first is the original consumer node of the batch
norm (../BatchNorm/batchnorm_1/add_1), the second is the consumer node of
the folded graph (add_fold).
"""
mul_scale_name = 'mul_1' if has_scaling else 'mul'
mul_scale = graph.get_operation_by_name(context + 'BatchNorm/batchnorm_1/' +
mul_scale_name)
op_below = mul_scale.inputs[0].op
# Skip over the BatchToSpace operation in the case of atrous convolutions.
batch_to_space_op = None
if op_below.type == 'BatchToSpaceND':
batch_to_space_op = op_below
op_below = op_below.inputs[0].op
weights = op_below.inputs[1]
match = _GetBatchNormParams(
graph=graph, context=context, has_scaling=has_scaling)
correction_scale, correction_recip, correction_offset = None, None, None
if is_training:
correction_scale, correction_recip, correction_offset = (
_ComputeBatchNormCorrections(
context=context,
match=match,
freeze_batch_norm_delay=freeze_batch_norm_delay))
# Special handling for weights of depthwise convolution.
if op_below.type == 'DepthwiseConv2dNative':
new_shape = [
weights.get_shape().as_list()[2],
weights.get_shape().as_list()[3]
]
scale_name = 'mul' if has_scaling else 'Rsqrt'
scale = graph.get_operation_by_name(context + 'BatchNorm/batchnorm_1/' +
scale_name)
scale = array_ops.reshape(scale.outputs[0], new_shape,
context + 'scale_reshape')
if correction_scale is not None:
correction_scale = array_ops.reshape(correction_scale, new_shape,
context + 'correction_reshape')
with ops.device(mul_scale.device):
weights = math_ops.multiply(correction_scale, weights,
context + 'correction_mult')
mul_fold = _CloneOp(mul_scale, context + 'mul_fold', [(0, weights),
(1, scale)])
elif op_below.type in ['Conv2D', 'MatMul']:
if correction_scale is not None:
with ops.device(mul_scale.device):
weights = math_ops.multiply(correction_scale, weights,
context + 'correction_mult')
mul_fold = _CloneOp(mul_scale, context + 'mul_fold', [(0, weights)])
else:
raise ValueError('Cannot handle operation of type: %s' % op_below.type)
_AssertShapesMatch('mul_fold', mul_fold.inputs[0], mul_fold.outputs[0])
conv_or_fc_folded = _CloneOp(op_below, op_below.name + '_Fold',
[(1, mul_fold.outputs[0])])
add_shift = graph.get_operation_by_name(context +
'BatchNorm/batchnorm_1/add_1')
corrected_output = conv_or_fc_folded.outputs[0]
# Copy the batch to space operation if we have a atrous convolution.
if batch_to_space_op:
corrected_output = array_ops.batch_to_space_nd(
corrected_output,
batch_to_space_op.inputs[1],
batch_to_space_op.inputs[2],
name=batch_to_space_op.name + '_Fold')
if correction_offset is not None:
with ops.device(conv_or_fc_folded.device):
corrected_output = math_ops.multiply(correction_recip, corrected_output,
context + 'post_conv_mul')
corrected_output = math_ops.add(corrected_output, (correction_offset),
context + 'correction_add')
add_fold = _CloneOp(add_shift, context + 'add_fold', [(0, corrected_output)])
_AssertShapesMatch('add_fold', add_fold.inputs[0], add_fold.outputs[0])
return add_shift, add_fold
def _CloneOp(op, new_name, new_inputs):
"""Clones a given op, replaces its name and some of its inputs.
Args:
op: Operation to modify.
new_name: String, a new name to set on cloned op.
new_inputs: A list of tuples (idx, tensor), each input with corresponding
index will be replaced by the given Tensor in the cloned op.
Returns:
Operation, the cloned op.
Raises:
TypeError: When Operation type is not supported.
ValueError: When input shapes are incompatible.
"""
inputs = list(op.inputs)
for new_input in new_inputs:
inputs[new_input[0]] = new_input[1]
return _OP_CLONER.Clone(op, inputs, new_name)
class _OpCloner(object):
"""Helper class that clones tf.Operations based on their type."""
def __init__(self):
self.op_type_to_action = {
'Mul': self._CloneMul,
'Add': self._CloneAdd,
'AddV2': self._CloneAdd,
'Conv2D': self._CloneConv2d,
'DepthwiseConv2dNative': self._CloneDepthwiseConv2d,
'MatMul': self._CloneMatMul,
}
def _CloneMul(self, op, inputs, new_name):
del op # Unused.
return math_ops.multiply(inputs[0], inputs[1], name=new_name).op
def _CloneAdd(self, op, inputs, new_name):
del op # Unused.
return math_ops.add(inputs[0], inputs[1], name=new_name).op
def _CloneConv2d(self, op, inputs, new_name):
input_tensor = inputs[0]
weights = inputs[1]
self._AssertConvShapes(op.name, input_tensor, weights)
return nn_ops.conv2d(
input_tensor,
weights,
strides=op.get_attr('strides'),
padding=op.get_attr('padding'),
use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'),
data_format=op.get_attr('data_format').decode(),
name=new_name).op
def _CloneDepthwiseConv2d(self, op, inputs, new_name):
input_tensor = inputs[0]
weights = inputs[1]
self._AssertConvShapes(op.name, input_tensor, weights)
return nn.depthwise_conv2d(
input_tensor,
weights,
strides=op.get_attr('strides'),
padding=op.get_attr('padding'),
name=new_name).op
def _CloneMatMul(self, op, inputs, new_name):
weights = inputs[0]
input_tensor = inputs[1]
self._AssertFCShapes(op.name, weights, input_tensor)
return math_ops.matmul(
weights,
input_tensor,
transpose_a=op.get_attr('transpose_a'),
transpose_b=op.get_attr('transpose_b'),
name=new_name).op
def Clone(self, op, inputs, new_name):
try:
return self.op_type_to_action[op.type](op, inputs, new_name)
except KeyError:
raise TypeError('Unsupported operation type: %s' % op.type)
def _AssertConvShapes(self, op_name, input_tensor, weights):
"""Makes sure that convolution inputs have compatible shapes.
Args:
op_name: Operation name, only used in error message.
input_tensor: Input that is convolved.
weights: Weights of the convolution filter.
Raises:
ValueError: When input shapes are incompatible.
"""
input_shape = input_tensor.get_shape()
weights_shape = weights.get_shape()
if (len(input_shape) != 4 or len(weights_shape) != 4 or
input_shape[3] != weights_shape[2]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, input_shape, weights_shape))
def _AssertFCShapes(self, op_name, weights, input_tensor):
"""Makes sure that FC layer inputs have compatible shapes.
Args:
op_name: Operation name, only used in error message.
weights: Weights used in FC layer.
input_tensor: Input into FC layer.
Raises:
ValueError: When input shapes are incompatible.
"""
weights_shape = weights.get_shape()
input_shape = input_tensor.get_shape()
if (len(weights_shape) != 2 or len(input_shape) != 2 or
weights_shape[1] != input_shape[0]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, weights_shape, input_shape))
_OP_CLONER = _OpCloner()
def _AssertShapesMatch(op_name, in_tensor, out_tensor):
"""Makes sure that shapes of input and output tensors are compatible.
Args:
op_name: String, operation name, only used in error message.
in_tensor: Tensor, input tensor.
out_tensor: Tensor, output tensor.
Raises:
ValueError: When input and output tensors have different shapes.
"""
in_shape = in_tensor.get_shape()
out_shape = out_tensor.get_shape()
if not in_shape.is_compatible_with(out_shape):
raise ValueError('%s should not change tensor shape: input %s, '
'output %s' % (op_name, in_shape, out_shape))
def _HasScaling(graph, input_to_ops_map, bn):
r"""Checks if batch norm has scaling enabled.
Difference between batch norm with scaling and without is that with scaling:
Rsqrt -> mul -> mul_1
\-> mul_2
where
mul multiplies gamma by inverse square root of EMA of batch variance,
mul_1 multiplies output of mul with output from the base operation
(convolution, FC or depthwise convolution),
mul_2 multiplies output of mul with EMA of batch mean,
and without scaling:
Rsqrt -> mul
\-> mul_1
where
mul multiplies the inverse square root of EMA of batch variance with output
from the base operation,
mul_1 multiplies inverse square root of EMA of batch variance with EMA
of batch mean.
Args:
graph: Graph to inspect.
input_to_ops_map: InputToOps object containing mapping from tensor's name
to ops that take it as input.
bn: Batch norm layer prefix string.
Returns:
A boolean indicating whether this batch norm layer has scaling enabled.
"""
rsqrt_op = graph.get_operation_by_name(bn + 'BatchNorm/batchnorm_1/Rsqrt')
rsqrt_consumers = input_to_ops_map.ConsumerOperations(rsqrt_op)
return sum(1 for op in rsqrt_consumers if op.type == 'Mul') == 1
class _BatchNormMatch(object):
"""Contains all information related to a found Fused/UnfusedBatchNorm."""
def __init__(self, layer_op, bn_op, output_tensor, input_tensor,
weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor, moving_mean_tensor, moving_variance_tensor,
bn_decay_mean_tensor, bn_decay_var_tensor, batch_epsilon,
batch_to_space_op):
self._layer_op = layer_op
self._bn_op = bn_op
self._output_tensor = output_tensor
self._input_tensor = input_tensor
self._weight_tensor = weight_tensor
self._gamma_tensor = gamma_tensor
self._beta_tensor = beta_tensor
self._mean_tensor = mean_tensor
self._variance_tensor = variance_tensor
self._moving_mean_tensor = moving_mean_tensor
self._moving_variance_tensor = moving_variance_tensor
self._bn_decay_mean_tensor = bn_decay_mean_tensor
self._bn_decay_var_tensor = bn_decay_var_tensor
self._batch_epsilon = batch_epsilon
self._batch_to_space_op = batch_to_space_op
@property
def layer_op(self):
return self._layer_op
@property
def bn_op(self):
return self._bn_op
@property
def output_tensor(self):
return self._output_tensor
@property
def input_tensor(self):
return self._input_tensor
@property
def weight_tensor(self):
return self._weight_tensor
@property
def gamma_tensor(self):
return self._gamma_tensor
@property
def beta_tensor(self):
return self._beta_tensor
@property
def mean_tensor(self):
return self._mean_tensor
@property
def variance_tensor(self):
return self._variance_tensor
@property
def moving_mean_tensor(self):
return self._moving_mean_tensor
@property
def moving_variance_tensor(self):
return self._moving_variance_tensor
@property
def batch_epsilon(self):
return self._batch_epsilon
@property
def bn_decay_mean_tensor(self):
return self._bn_decay_mean_tensor
@property
def bn_decay_var_tensor(self):
return self._bn_decay_var_tensor
@property
def batch_to_space_op(self):
return self._batch_to_space_op
|
tensorflow-master
|
tensorflow/contrib/quantize/python/fold_batch_norms.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for quantizing a Tensorflow graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import quantize
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
conv2d = layers.conv2d
separable_conv2d = layers.separable_conv2d
class QuantizeTest(test_util.TensorFlowTestCase):
def _RunTestOverParameters(self, test_fn):
params = [True, False]
for is_training in params:
test_fn(is_training)
def testInsertQuantOpFailsWhenOpsNotConnected(self):
pass
def _TestInsertQuantOpFailsWhenOpsNotConnected(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
conv = conv2d(inputs, 32, [5, 5], stride=2, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None, scope='test')
relu = nn_ops.relu6(inputs)
# Inserting a quantization op between two unconnected ops should fail with
# ValueError.
with self.assertRaises(ValueError) as err:
quantize._InsertQuantOp('test', is_training, conv.op, [relu.op],
'FailingQuantOp')
self.assertEqual(
str(err.exception), 'Some inputs not quantized for ops: [Relu6]')
def testInsertQuantOpForAddAfterConv2d(self):
self._RunTestOverParameters(self._TestInsertQuantOpForAddAfterConv2d)
def _TestInsertQuantOpForAddAfterConv2d(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None, scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
# Scan through all FakeQuant operations, ensuring that the activation
# isn't in the consumers of the operation. Since activations are folded
# the preceding operation during inference, the FakeQuant operation after
# the activation is all that is needed.
for op in graph.get_operations():
if op.type == quantization_node_name:
quant_op = graph.get_operation_by_name(op.name)
consumers = []
for output in quant_op.outputs:
consumers.extend(output.consumers())
self.assertNotIn('test/relu6', [c.name for c in consumers])
def testInsertQuantOpForAddAfterSeparableConv2d(self):
self._RunTestOverParameters(
self._TestInsertQuantOpForAddAfterSeparableConv2d)
def _TestInsertQuantOpForAddAfterSeparableConv2d(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, depth))
conv = separable_conv2d(input1, None, [5, 5], stride=2,
depth_multiplier=1.0, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None, scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Check if output of bias add is quantized
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
for op in graph.get_operations():
if op.type == quantization_node_name:
quant_op = graph.get_operation_by_name(op.name)
# Scan through all FakeQuant operations, ensuring that the activation
# identity op isn't in the consumers of the operation.
consumers = []
for output in quant_op.outputs:
consumers.extend(output.consumers())
self.assertNotIn('test/relu6', [c.name for c in consumers])
def testInsertQuantOpInSeparableConv2d(self):
self._RunTestOverParameters(self._TestInsertQuantOpInSeparableConv2d)
def _TestInsertQuantOpInSeparableConv2d(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, depth))
conv = separable_conv2d(
input1,
3, [5, 5],
stride=2,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Check if output of bias add is quantized
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
# Check if weights for both convs inside seperable conv are quantized
pointwise_weight_quant = graph.get_operation_by_name(
'test/test/weights_quant/' + quantization_node_name)
self.assertEqual(pointwise_weight_quant.type, quantization_node_name)
depthwise_weight_quant = graph.get_operation_by_name(
'test/test/separable_conv2d/weights_quant/' + quantization_node_name)
self.assertEqual(depthwise_weight_quant.type, quantization_node_name)
# Check if activations after first depthwise conv are quantized.
depthwise_act_quant = graph.get_operation_by_name(
'test/test/separable_conv2d/act_quant/' + quantization_node_name)
self.assertEqual(depthwise_act_quant.type, quantization_node_name)
for op in graph.get_operations():
if op.type == quantization_node_name:
quant_op = graph.get_operation_by_name(op.name)
# Scan through all FakeQuant operations, ensuring that the activation
# identity op isn't in the consumers of the operation.
consumers = []
for output in quant_op.outputs:
consumers.extend(output.consumers())
self.assertNotIn('test/relu6', [c.name for c in consumers])
def testLayerActivationQuantized(self):
self._RunTestOverParameters(self._TestLayerActivationQuantized)
def _TestLayerActivationQuantized(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
biases_initializer=None,
scope='test')
# Ensure that both weights and output of activations are quantized
# when we have a conv->relu6 with no bias add
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
activation_op = graph.get_operation_by_name('test/Relu6')
conv_op = graph.get_operation_by_name('test/Conv2D')
self.assertTrue('test/weights_quant/FakeQuantWithMinMaxVars:0' in
[tensor_in.name for tensor_in in conv_op.inputs])
self.assertTrue('FakeQuantWithMinMaxVars' in
[op.type for op in activation_op.outputs[0].consumers()])
def testFinalLayerQuantized(self):
self._RunTestOverParameters(self._TestFinalLayerQuantized)
def _TestFinalLayerQuantized(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
# Ensure that the a FakeQuant operation is in the outputs of the BiasAdd.
bias_add_op = graph.get_operation_by_name('test/BiasAdd')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
self.assertTrue('FakeQuantWithMinMaxVars' in
[op.type for op in bias_add_op.outputs[0].consumers()])
def testPostActivationBypassQuantized(self):
self._RunTestOverParameters(self._TestPostActivationBypassQuantized)
def _TestPostActivationBypassQuantized(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
scope='test/test')
bypass_tensor = math_ops.add(conv, input2, name='test/add')
# The output of the post_activation bypass will be another layer.
_ = conv2d(
bypass_tensor,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
scope='test/unused')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Ensure that the bypass node is preceded by and followed by a
# FakeQuantWithMinMaxVar operation, since the output of the Add isn't an
# activation.
self.assertTrue('FakeQuantWithMinMaxVars' in
[c.type for c in bypass_tensor.consumers()])
self.assertTrue('FakeQuantWithMinMaxVars' in
[i.op.type for i in bypass_tensor.op.inputs])
def testOverlappingPostActivationBypassQuantized(self):
self._RunTestOverParameters(
self._TestOverlappingPostActivationBypassQuantized)
def _TestOverlappingPostActivationBypassQuantized(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
conv_input = array_ops.zeros((batch_size, height, width, depth))
conv1 = conv2d(
conv_input,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
scope='test/test1')
# The bypass of this conv is the post activation bypass of the previous
# conv.
conv2 = conv2d(
conv_input,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test/test2')
bypass_tensor = math_ops.add(conv1, conv2, name='test/add')
_ = nn_ops.relu6(bypass_tensor, name='test/output')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Ensure that the bypass node is preceded by a FakeQuantWithMinMaxVar
# operation, and NOT followed by one.
self.assertTrue('FakeQuantWithMinMaxVars' not in
[c.type for c in bypass_tensor.consumers()])
self.assertTrue('FakeQuantWithMinMaxVars' in
[i.op.type for i in bypass_tensor.op.inputs])
# Ensure that all the convs and activations are quantized.
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(
'test/test1/weights_quant/FakeQuantWithMinMaxVars' in op_names)
self.assertTrue(
'test/test2/weights_quant/FakeQuantWithMinMaxVars' in op_names)
self.assertTrue(
'test/test1/act_quant/FakeQuantWithMinMaxVars' in op_names)
self.assertTrue('test/act_quant/FakeQuantWithMinMaxVars' in op_names)
self.assertEqual(
'Relu6',
graph.get_operation_by_name(
'test/test1/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type)
self.assertEqual(
'Relu6',
graph.get_operation_by_name(
'test/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type)
def testWithNameScope(self):
self._RunTestOverParameters(self._TestWithNameScope)
def _TestWithNameScope(self, is_training):
graph = ops.Graph()
with graph.as_default():
with graph.name_scope('name_scope'):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
for op in graph.get_operations():
self.assertTrue(not op.name.startswith('name_scope/name_scope/'),
'Broken op: %s' % op.name)
def testWithNullNameScope(self):
self._RunTestOverParameters(self._TestWithNullNameScope)
def _TestWithNullNameScope(self, is_training):
graph = ops.Graph()
with graph.as_default():
with graph.name_scope(None):
batch_size, height, width, depth = 5, 128, 128, 32
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Passes if Quantize() does not crash.
def testWithNonMatchingNameScope(self):
self._RunTestOverParameters(self._testWithNonMatchingNameScope)
def _testWithNonMatchingNameScope(self, is_training):
graph = ops.Graph()
with graph.as_default():
with graph.name_scope('name_scope'):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
op_names_before_quantize = set([op.name for op in graph.get_operations()])
quantize.Quantize(
graph, is_training, weight_bits=8, activation_bits=8,
scope='NonExisting/')
op_names_after_quantize = set([op.name for op in graph.get_operations()])
# No ops should be inserted or removed.
self.assertEqual(op_names_before_quantize, op_names_after_quantize)
def testSinglePartitionedVariable(self):
self._RunTestOverParameters(self._testSinglePartitionedVariable)
def _testSinglePartitionedVariable(self, is_training):
# When weights are partitioned into a single partition, the weights variable
# is followed by a identity -> identity (An additional identity node).
partitioner = partitioned_variables.fixed_size_partitioner(1)
graph = ops.Graph()
with graph.as_default():
with variable_scope.variable_scope('part', partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Check that the weight's quant node was added.
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(
'part/test/test/weights_quant/FakeQuantWithMinMaxVars' in op_names)
def testMultiplePartitionedVariables(self):
self._RunTestOverParameters(self._testMultiplePartitionedVariables)
def _testMultiplePartitionedVariables(self, is_training):
# When weights are partitioned into multiple partitions the weights variable
# is followed by a identity -> concat -> identity to group the partitions.
partitioner = partitioned_variables.fixed_size_partitioner(2)
graph = ops.Graph()
with graph.as_default():
with variable_scope.variable_scope('part', partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Check that the weight's quant node was added.
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(
'part/test/test/weights_quant/FakeQuantWithMinMaxVars' in op_names)
def testSkipReshapeQuantization(self):
self._RunTestOverParameters(self._TestSkipReshapeQuantization)
def _TestSkipReshapeQuantization(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
scope='test/test')
reshape = array_ops.reshape(
conv, (int(10), int(height / 2), int(width / 2), int(16)))
# Insert a fake quant node after the reshape. We will check that one isn't
# insert before.
array_ops.fake_quant_with_min_max_vars(reshape, -1, 1)
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Ensure that there isn't a FakeQuant added before the reshape.
self.assertFalse(
'FakeQuantWithMinMaxVars' in [i.op.type for i in reshape.op.inputs])
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
scope='test/test')
reshape = array_ops.reshape(
conv, (int(10), int(height / 2), int(width / 2), int(16)))
# If no fake quant is added after the reshape, a FakeQuant should be added
# before the reshape.
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Ensure that there isn't a FakeQuant added before the reshape.
self.assertTrue(
'FakeQuantWithMinMaxVars' in [i.op.type for i in reshape.op.inputs])
def testSeparableConvWithResourceVar(self):
graph = ops.Graph()
with graph.as_default():
with variable_scope.variable_scope('', use_resource=True):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
kernel_size, depth_multiplier = 3, 1
depthwise_shape = [kernel_size, kernel_size, depth, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights', shape=depthwise_shape)
strides = [1, 1, 1, 1]
with variable_scope.variable_scope('depthwise_conv_1'):
conv1 = nn.depthwise_conv2d(
input1, depthwise_weights, strides, padding='SAME')
with variable_scope.variable_scope('depthwise_conv_2'):
conv2 = nn.depthwise_conv2d(
conv1, depthwise_weights, strides, padding='SAME')
math_ops.add(conv2, input1, name='add')
quantize.Quantize(graph, True)
# Test that the weights and activations of all convs have been quantized.
quant_node_name = 'FakeQuantWithMinMaxVars'
weights_quant = graph.get_operation_by_name(
'depthwise_conv_1/weights_quant/' + quant_node_name)
self.assertEqual(weights_quant.type, quant_node_name)
act_quant = graph.get_operation_by_name('depthwise_conv_1/act_quant/' +
quant_node_name)
self.assertEqual(act_quant.type, quant_node_name)
weights_quant = graph.get_operation_by_name(
'depthwise_conv_2/weights_quant/' + quant_node_name)
self.assertEqual(weights_quant.type, quant_node_name)
act_quant = graph.get_operation_by_name('depthwise_conv_2/act_quant/' +
quant_node_name)
self.assertEqual(act_quant.type, quant_node_name)
def _WeightInit(self, stddev):
"""Returns truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initialized that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/quantize/python/quantize_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for third_party.tensorflow.contrib.quantize.python.quant_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
_MIN_MAX_VARS = 'min_max_vars'
_SYMMETRIC_RANGE_RATIO = 0.9921875 # 127 / 128
class QuantOpsTest(googletest.TestCase):
def testLastValueQuantizeTrainingAssign(self):
min_value, max_value = self._GetMinMaxValues(quant_ops.LastValueQuantize,
[[-1, 1]])
self.assertEqual(min_value, -1.0)
self.assertEqual(max_value, 1.0)
def testLastValueSymmetricQuantizeTrainingAssign(self):
min_value, max_value = self._GetMinMaxValues(
quant_ops.LastValueQuantize,
[[-_SYMMETRIC_RANGE_RATIO, _SYMMETRIC_RANGE_RATIO]],
symmetric=True,
narrow_range=False)
self.assertEqual(min_value, -1.0)
self.assertEqual(max_value, _SYMMETRIC_RANGE_RATIO)
def testLastValueSymmetricQuantizeNarrowRangeTrainingAssign(self):
min_value, max_value = self._GetMinMaxValues(
quant_ops.LastValueQuantize, [[-1, 0.5]],
symmetric=True,
narrow_range=True)
self.assertEqual(min_value, -1.0)
self.assertEqual(max_value, 1)
def testMovingAvgQuantizeTrainingAssign(self):
min_value, max_value = self._GetMinMaxValues(quant_ops.MovingAvgQuantize,
[[-1, 1], [0, 0]])
self.assertAlmostEqual(min_value, -0.5, delta=1e-3)
self.assertAlmostEqual(max_value, 0.5, delta=1e-3)
def testMovingAvgQuantizeTrainingAssignNoShape(self):
min_value, max_value = self._GetMinMaxValues(
quant_ops.MovingAvgQuantize, [[-1, 1], [0, 0]], shape=None)
self.assertAlmostEqual(min_value, -0.5, delta=1e-3)
self.assertAlmostEqual(max_value, 0.5, delta=1e-3)
def testMovingAvgSymmetricQuantizeTrainingAssign(self):
min_value, max_value = self._GetMinMaxValues(
quant_ops.MovingAvgQuantize, [[-1, 0.5], [0, 0]], symmetric=True)
self.assertAlmostEqual(min_value, -0.5, delta=1e-3)
self.assertAlmostEqual(max_value, 0.5 * _SYMMETRIC_RANGE_RATIO, delta=1e-3)
self.assertAlmostEqual(max_value, min_value * -_SYMMETRIC_RANGE_RATIO)
def testMovingAvgSymmetricQuantizeNarrowRangeTrainingAssign(self):
min_value, max_value = self._GetMinMaxValues(
quant_ops.MovingAvgQuantize, [[-1, 0.5], [0, 0]],
symmetric=True,
narrow_range=True)
self.assertAlmostEqual(min_value, -0.5, delta=1e-3)
self.assertAlmostEqual(max_value, 0.5, delta=1e-3)
self.assertAlmostEqual(max_value, -min_value)
def testVariablesNotPartitioned_LastValue(self):
# Variables added should not use a default partiioner since they are
# scalar. There would be a tensorflow error thrown if the partitioner was
# respected by the rewrite.
with ops.Graph().as_default():
with variable_scope.variable_scope(
'part', partitioner=partitioned_variables.fixed_size_partitioner(2)):
x = array_ops.placeholder(dtypes.float32, shape=[2])
_ = quant_ops.LastValueQuantize(
x,
init_min=0.0,
init_max=0.0,
is_training=True,
vars_collection=_MIN_MAX_VARS)
def testVariablesNotPartitioned_MovingAvg(self):
# Variables added should not use a default partiioner since they are
# scalar. There would be a tensorflow error thrown if the partitioner was
# respected by the rewrite.
with ops.Graph().as_default():
with variable_scope.variable_scope(
'part', partitioner=partitioned_variables.fixed_size_partitioner(2)):
x = array_ops.placeholder(dtypes.float32, shape=[2])
_ = quant_ops.MovingAvgQuantize(
x,
init_min=0.0,
init_max=0.0,
is_training=True,
vars_collection=_MIN_MAX_VARS)
def _GetMinMaxValues(self, quantize_fn, input_values, shape=(2), **kwds):
g = ops.Graph()
with session.Session(graph=g) as sess:
x = array_ops.placeholder(dtypes.float32, shape=shape)
y = quantize_fn(
x,
init_min=0.0,
init_max=0.0,
is_training=True,
vars_collection=_MIN_MAX_VARS,
**kwds)
# Run the step.
sess.run(variables.global_variables_initializer())
for input_elem in input_values:
sess.run(y, feed_dict={x: input_elem})
# Now check that the min_max_vars were, in fact, updated.
min_max_vars = ops.get_collection(_MIN_MAX_VARS)
self.assertEqual(len(min_max_vars), 2)
min_idx = 0 if 'min' in min_max_vars[0].name else 1
max_idx = (min_idx + 1) % 2
min_var, max_var = min_max_vars[min_idx], min_max_vars[max_idx]
min_max_values = sess.run([min_var, max_var])
return min_max_values[0], min_max_values[1]
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/quantize/python/quant_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python support for quantization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import moving_averages
def FixedQuantize(inputs, init_min=-6.0, init_max=6.0, scope=None):
"""Adds a fake quantize layer with fixed quantization interval.
Args:
inputs: a tensor containing values to be quantized.
init_min: the lower end of quantization interval.
init_max: the upper end of quantization interval.
scope: Optional scope for name_scope.
Returns:
a tensor containing quantized values.
"""
with ops.name_scope(scope, 'FixedQuantize', values=[inputs]):
return array_ops.fake_quant_with_min_max_args(
inputs, min=init_min, max=init_max)
def _ModelVariable(name,
shape=None,
initializer=None,
collections=None,
trainable=None):
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES]
return variable_scope.get_variable(
name,
shape=shape,
initializer=initializer,
collections=collections,
trainable=trainable,
aggregation=variable_scope.VariableAggregation.MEAN)
def LastValueQuantize(inputs,
per_channel=False,
init_min=-6.0,
init_max=6.0,
vars_collection=None,
name_prefix='LastValueQuant',
reuse=None,
is_training=True,
num_bits=8,
narrow_range=False,
symmetric=False):
"""Adds a layer that collects quantization ranges as last input ranges.
LastValueQuantize creates variables called 'min' and 'max', representing the
interval used for quantization and clamping.
Args:
inputs: a tensor containing values to be quantized.
per_channel: (Optional) a boolean specifying whether to use different
quantization ranges per output channel.
init_min: a float scalar, the initial value for variable min.
init_max: a float scalar, the initial value for variable max.
vars_collection: (Optional) collection where to store variables for
quantization interval ends.
name_prefix: name_prefix for created nodes.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
is_training: Whether the op is applied to a training or eval graph.
num_bits: Number of bits to use for quantization, must be between 2 and 8.
narrow_range: Whether to use the narrow quantization range
[1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
Returns:
a tensor containing quantized values.
"""
with variable_scope.variable_scope(
None, default_name=name_prefix, values=[inputs], reuse=reuse) as scope:
scope.set_partitioner(None)
input_shape = inputs.get_shape()
input_dim = len(input_shape)
if per_channel:
# Only support quantizing 1-, 2- and 4-dimensional tensors.
assert input_dim in [1, 2, 4], ('Expected 1D, 2D or 4D input, was: %s in '
' scope: %s' % (input_shape, name_prefix))
min_max_shape = [input_shape[-1]]
else:
min_max_shape = []
vars_collections = [vars_collection] if vars_collection else []
min_var = _ModelVariable(
'min',
shape=min_max_shape,
initializer=init_ops.constant_initializer(init_min),
collections=vars_collections,
trainable=False)
max_var = _ModelVariable(
'max',
shape=min_max_shape,
initializer=init_ops.constant_initializer(init_max),
collections=vars_collections,
trainable=False)
if not is_training:
return _FakeQuantWithMinMaxVars(
inputs,
min_var,
max_var,
per_channel=per_channel,
num_bits=num_bits,
narrow_range=narrow_range)
if per_channel:
if input_dim == 2:
reduce_dims = [0]
elif input_dim == 4:
reduce_dims = [0, 1, 2]
if per_channel:
if input_dim >= 2:
batch_min = math_ops.reduce_min(
inputs, axis=reduce_dims, name='BatchMin')
else:
batch_min = inputs
else:
batch_min = math_ops.reduce_min(inputs, name='BatchMin')
if per_channel:
if input_dim >= 2:
batch_max = math_ops.reduce_max(
inputs, axis=reduce_dims, name='BatchMax')
else:
batch_max = inputs
else:
batch_max = math_ops.reduce_max(inputs, name='BatchMax')
if symmetric:
if narrow_range:
min_max_ratio = -1
else:
# In two's complement notation, the negative range is slightly larger
# than the positive range.
min_max_ratio = -((1 << num_bits) - 2) / (1 << num_bits)
# TFLite requires that 0.0 is always in the [min; max] range. Because
# batch_min <= batch_max, it follows that range_min <= 0 <= range_max.
range_min = math_ops.minimum(batch_min, batch_max / min_max_ratio)
range_max = math_ops.maximum(batch_max, batch_min * min_max_ratio)
else:
# TFLite requires that 0.0 is always in the [min; max] range.
range_min = math_ops.minimum(batch_min, 0.0)
range_max = math_ops.maximum(batch_max, 0.0)
assign_min = state_ops.assign(min_var, range_min, name='AssignMinLast')
assign_max = state_ops.assign(max_var, range_max, name='AssignMaxLast')
return _FakeQuantWithMinMaxVars(
inputs,
assign_min,
assign_max,
per_channel=per_channel,
num_bits=num_bits,
narrow_range=narrow_range)
def MovingAvgQuantize(inputs,
per_channel=False,
init_min=-6.0,
init_max=6.0,
ema_decay=0.999,
vars_collection=ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
name_prefix='MovingAvgQuantize',
reuse=None,
is_training=True,
num_bits=8,
narrow_range=False,
symmetric=False):
"""Adds a layer that collects quantization ranges as EMAs of input ranges.
MovingAvgQuantize creates variables called 'min' and 'max', representing the
interval used for quantization and clamping.
Args:
inputs: a tensor containing values to be quantized.
per_channel: (default False) a boolean specifying whether to use different
quantization ranges per output channel.
init_min: a float scalar, the initial value for variable min.
init_max: a float scalar, the initial value for variable max.
ema_decay: EMA decay parameter.
vars_collection: (Optional) collection where to store variables for
quantization interval ends.
name_prefix: name_prefix for created nodes.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
is_training: Whether the op is applied to a training or eval graph.
num_bits: Number of bits to use for quantization, must be between 2 and 8.
narrow_range: Whether to use the narrow quantization range
[1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
Returns:
a tensor containing quantized values.
"""
with variable_scope.variable_scope(
None, default_name=name_prefix, values=[inputs], reuse=reuse) as scope:
scope.set_partitioner(None)
input_shape = inputs.get_shape()
if per_channel:
input_dim = len(input_shape)
# Only support quantizing 1-, 2- and 4-dimensional tensors.
assert input_dim in [1, 2, 4], ('Expected 1D, 2D or 4D input, was: %s in '
' scope: %s' % (input_shape, name_prefix))
min_max_shape = [input_shape[-1]]
else:
min_max_shape = []
vars_collections = [vars_collection] if vars_collection else []
min_var = _ModelVariable(
'min',
shape=min_max_shape,
initializer=init_ops.constant_initializer(init_min),
collections=vars_collections,
trainable=False)
max_var = _ModelVariable(
'max',
shape=min_max_shape,
initializer=init_ops.constant_initializer(init_max),
collections=vars_collections,
trainable=False)
if not is_training:
return _FakeQuantWithMinMaxVars(
inputs,
min_var,
max_var,
per_channel=per_channel,
num_bits=num_bits,
narrow_range=narrow_range)
if per_channel:
if input_dim == 2:
reduce_dims = [0]
elif input_dim == 4:
reduce_dims = [0, 1, 2]
if per_channel:
if input_dim >= 2:
batch_min = math_ops.reduce_min(
inputs, axis=reduce_dims, name='BatchMin')
else:
batch_min = inputs
else:
batch_min = math_ops.reduce_min(inputs, name='BatchMin')
if per_channel:
if input_dim >= 2:
batch_max = math_ops.reduce_max(
inputs, axis=reduce_dims, name='BatchMax')
else:
batch_max = inputs
else:
batch_max = math_ops.reduce_max(inputs, name='BatchMax')
if symmetric:
if narrow_range:
min_max_ratio = -1
else:
# In two's complement notation, the negative range is slightly larger
# than the positive range.
min_max_ratio = -((1 << num_bits) - 2) / (1 << num_bits)
# TFLite requires that 0.0 is always in the [min; max] range. Because
# batch_min <= batch_max, it follows that range_min <= 0 <= range_max.
range_min = math_ops.minimum(batch_min, batch_max / min_max_ratio)
range_max = math_ops.maximum(batch_max, batch_min * min_max_ratio)
else:
# TFLite requires that 0.0 is always in the [min; max] range.
range_min = math_ops.minimum(batch_min, 0.0)
range_max = math_ops.maximum(batch_max, 0.0)
assign_min = moving_averages.assign_moving_average(
min_var, range_min, ema_decay, name='AssignMinEma')
assign_max = moving_averages.assign_moving_average(
max_var, range_max, ema_decay, name='AssignMaxEma')
return _FakeQuantWithMinMaxVars(
inputs,
assign_min,
assign_max,
per_channel=per_channel,
num_bits=num_bits,
narrow_range=narrow_range)
def _FakeQuantWithMinMaxVars(inputs, min_var, max_var, per_channel, num_bits,
narrow_range):
"""Adds a fake quantization operation.
Depending on value of per_channel, this operation may do global quantization
or per channel quantization. min_var and max_var should have corresponding
shapes: [1] when per_channel == False and [d] when per_channel == True.
Args:
inputs: a tensor containing values to be quantized.
min_var: a variable containing quantization range lower end(s).
max_var: a variable containing quantization range upper end(s).
per_channel: a boolean specifying whether to use per-channel quantization.
num_bits: Number of bits to use for quantization, must be between 2 and 8.
narrow_range: Whether to use the narrow quantization range
[1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].
Returns:
a tensor containing quantized values.
"""
if per_channel:
assert len(min_var.get_shape()) == 1
assert len(max_var.get_shape()) == 1
return array_ops.fake_quant_with_min_max_vars_per_channel(
inputs, min_var, max_var, num_bits=num_bits, narrow_range=narrow_range)
else:
assert min_var.get_shape() == [] # pylint: disable=g-explicit-bool-comparison
assert max_var.get_shape() == [] # pylint: disable=g-explicit-bool-comparison
return array_ops.fake_quant_with_min_max_vars(
inputs, min_var, max_var, num_bits=num_bits, narrow_range=narrow_range)
|
tensorflow-master
|
tensorflow/contrib/quantize/python/quant_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parameterized unit tests for quantizing a Tensorflow graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import fold_batch_norms
from tensorflow.contrib.quantize.python import quantize
from tensorflow.python.compat import compat
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
batch_norm = layers.batch_norm
conv2d = layers.conv2d
fully_connected = layers.fully_connected
separable_conv2d = layers.separable_conv2d
class QuantizeTest(test_util.TensorFlowTestCase):
def _RunWithoutBatchNormTestOverParameters(self, test_fn):
# TODO(suharshs): Use parameterized test once OSS TF supports it.
parameters_list = [
# (activation, activation_op_name, with_bypass, delay)
(nn_ops.relu6, 'Relu6', False, None),
(nn_ops.relu, 'Relu', False, None),
(array_ops.identity, 'Identity', False, None),
(nn_ops.relu6, 'Relu6', False, 5000),
(nn_ops.relu, 'Relu', False, 5000),
(array_ops.identity, 'Identity', False, 5000),
(nn_ops.relu6, 'Relu6', True, None),
(nn_ops.relu, 'Relu', True, None),
(array_ops.identity, 'Identity', True, None),
(nn_ops.relu6, 'Relu6', True, 5000),
(nn_ops.relu, 'Relu', True, 5000),
(array_ops.identity, 'Identity', True, 5000),
]
for params in parameters_list:
# Test everything with resource variables and normal variables.
test_fn(params[0], params[1], params[2], params[3], False, None)
test_fn(params[0], params[1], params[2], params[3], True, None)
# Test with both empty scope and an example scope
test_fn(params[0], params[1], params[2], params[3], False, 'test')
test_fn(params[0], params[1], params[2], params[3], True, 'test')
def _AssertCorrectQuantizedGraphWithoutBatchNorm(
self, graph, scope, layer, activation_op_name, with_bypass, delay,
use_resource):
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_scope = self._GetConvScope(scope, with_bypass)
delim = '/' if conv_scope else ''
if scope:
scope = scope + '/'
weights_quant = graph.get_operation_by_name(
conv_scope + delim + 'weights_quant/' + quantization_node_name)
self.assertEqual(weights_quant.type, quantization_node_name)
# Assemble the expected inputs.
if use_resource:
expected_inputs = [
conv_scope + delim +
'weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
conv_scope + delim +
'weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
if layer == 'DepthwiseConv2dNative':
expected_inputs.append(conv_scope + delim + 'depthwise/ReadVariableOp')
else:
expected_inputs.append(conv_scope + delim + layer + '/ReadVariableOp')
else:
expected_inputs = [
conv_scope + delim + 'weights_quant/AssignMinLast',
conv_scope + delim + 'weights_quant/AssignMaxLast',
]
if layer == 'DepthwiseConv2dNative':
expected_inputs.append(conv_scope + delim + 'depthwise_weights/read')
else:
expected_inputs.append(conv_scope + delim + 'weights/read')
self._AssertInputOpsAre(weights_quant, expected_inputs)
if delay and delay > 0:
output_op_name = (
conv_scope + delim + 'weights_quant/delayed_quant/Switch_1')
else:
if layer == 'DepthwiseConv2dNative':
output_op_name = conv_scope + delim + 'depthwise'
else:
output_op_name = conv_scope + delim + layer
self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])
if with_bypass:
conv_quant = graph.get_operation_by_name(
conv_scope + delim + 'conv_quant/' + quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
conv_scope + delim +
'conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
conv_scope + delim +
'conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
conv_scope + delim + 'BiasAdd',
]
else:
expected_inputs = [
conv_scope + delim + 'conv_quant/AssignMinEma',
conv_scope + delim + 'conv_quant/AssignMaxEma',
conv_scope + delim + 'BiasAdd'
]
self._AssertInputOpsAre(conv_quant, expected_inputs)
output_op_name = (
conv_scope + delim +
'conv_quant/delayed_quant/Switch_1' if delay else scope + 'AddV2')
self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])
act_quant = graph.get_operation_by_name(scope + 'act_quant/' +
quantization_node_name)
self.assertEqual(act_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
scope + 'act_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
scope + 'act_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
scope + activation_op_name,
]
else:
expected_inputs = [
scope + 'act_quant/AssignMinEma', scope + 'act_quant/AssignMaxEma',
scope + activation_op_name
]
self._AssertInputOpsAre(act_quant, expected_inputs)
output_op_name = (
scope + 'act_quant/delayed_quant/Switch_1'
if delay else 'control_dependency')
self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
self._AssertIdempotent(graph)
def testQuantize_Conv2dWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_Conv2dWithoutBatchNorm)
def _TestQuantize_Conv2dWithoutBatchNorm(self, activation, activation_op_name,
with_bypass, delay, use_resource,
scope):
"""Tests quantization: inputs -> Conv2d no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
out_depth = 3 if with_bypass else 32
activation_fn = None if with_bypass else activation
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=conv_scope)
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
if conv_scope is None:
conv_scope = ''
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'Conv2D', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_FCWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_FCWithoutBatchNorm)
def _TestQuantize_FCWithoutBatchNorm(self, activation, activation_op_name,
with_bypass, delay, use_resource, scope):
"""Tests quantization: inputs -> FC no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
activation_fn = None if with_bypass else activation
fc_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=activation_fn,
scope=fc_scope)
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'MatMul', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_DepthwiseConv2dWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_DepthwiseConv2dWithoutBatchNorm)
def _TestQuantize_DepthwiseConv2dWithoutBatchNorm(
self, activation, activation_op_name, with_bypass, delay, use_resource,
scope):
"""Tests quantization: inputs -> DWConv2d no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else activation
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=conv_scope)
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name, with_bypass,
delay, use_resource)
def testQuantize_AtrousConvWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_AtrousConvWithoutBatchNorm)
def _TestQuantize_AtrousConvWithoutBatchNorm(self, activation,
activation_op_name, with_bypass,
delay, use_resource, scope):
"""Tests quantization: inputs -> atrous conv no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
dilation_rate = 2
activation_fn = None if with_bypass else activation
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = separable_conv2d(
inputs,
None, [3, 3],
rate=dilation_rate,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=conv_scope)
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name, with_bypass,
delay, use_resource)
def _RunBatchNormTestOverParameters(self, test_fn):
# TODO(suharshs): Use parameterized test once OSS TF supports it.
parameters_list = [
# (activation, activation_op_name, with_bypass, delay, fused_batch_norm)
(nn_ops.relu6, 'Relu6', False, None, False),
(nn_ops.relu, 'Relu', False, None, False),
(array_ops.identity, 'Identity', False, None, False),
(nn_ops.relu6, 'Relu6', False, 5000, False),
(nn_ops.relu, 'Relu', False, 5000, False),
(array_ops.identity, 'Identity', False, 5000, False),
(nn_ops.relu6, 'Relu6', True, None, False),
(nn_ops.relu, 'Relu', True, None, False),
(array_ops.identity, 'Identity', True, None, False),
(nn_ops.relu6, 'Relu6', True, 5000, False),
(nn_ops.relu, 'Relu', True, 5000, False),
(array_ops.identity, 'Identity', True, 5000, False),
(nn_ops.relu6, 'Relu6', False, None, True),
(nn_ops.relu, 'Relu', False, None, True),
(array_ops.identity, 'Identity', False, None, True),
(nn_ops.relu6, 'Relu6', False, 5000, True),
(nn_ops.relu, 'Relu', False, 5000, True),
(array_ops.identity, 'Identity', False, 5000, True),
(nn_ops.relu6, 'Relu6', True, None, True),
(nn_ops.relu, 'Relu', True, None, True),
(array_ops.identity, 'Identity', True, None, True),
(nn_ops.relu6, 'Relu6', True, 5000, True),
(nn_ops.relu, 'Relu', True, 5000, True),
(array_ops.identity, 'Identity', True, 5000, True)
]
for params in parameters_list:
# Test everything with resource variables and normal variables.
test_fn(params[0], params[1], params[2], params[3], params[4], False,
None)
test_fn(params[0], params[1], params[2], params[3], params[4], True, None)
test_fn(params[0], params[1], params[2], params[3], params[4], False,
'test')
test_fn(params[0], params[1], params[2], params[3], params[4], True,
'test')
def _AssertCorrectQuantizedGraphWithBatchNorm(self, graph, scope, layer,
activation_op_name, with_bypass,
delay, use_resource):
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_scope = self._GetConvScope(scope, with_bypass)
delim = '/' if conv_scope else ''
if scope:
scope = scope + '/'
weights_quant = graph.get_operation_by_name(
conv_scope + delim + 'weights_quant/' + quantization_node_name)
self.assertEqual(weights_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
conv_scope + delim +
'weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
conv_scope + delim +
'weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
conv_scope + delim + 'weights_quant/' + 'AssignMinLast',
conv_scope + delim + 'weights_quant/' + 'AssignMaxLast'
]
expected_inputs.append(conv_scope + delim + 'mul_fold')
self._AssertInputOpsAre(weights_quant, expected_inputs)
if layer == 'DepthwiseConv2dNative':
output_op_name = conv_scope + delim + (
'weights_quant/delayed_quant/Switch_1' if delay else 'depthwise_Fold')
else:
output_op_name = conv_scope + delim + (
'weights_quant/delayed_quant/Switch_1' if delay else layer + '_Fold')
self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])
if with_bypass:
conv_quant = graph.get_operation_by_name(
conv_scope + delim + 'conv_quant/' + quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
conv_scope + delim +
'conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
conv_scope + delim +
'conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
conv_scope + delim + 'conv_quant/AssignMinEma',
conv_scope + delim + 'conv_quant/AssignMaxEma',
]
expected_inputs.append(conv_scope + delim + 'add_fold')
self._AssertInputOpsAre(conv_quant, expected_inputs)
output_op_name = (
conv_scope + delim +
'conv_quant/delayed_quant/Switch_1' if delay else scope + 'AddV2')
self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])
act_quant = graph.get_operation_by_name(scope + 'act_quant/' +
quantization_node_name)
self.assertEqual(act_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
scope + 'act_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
scope + 'act_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
scope + 'act_quant/AssignMinEma',
scope + 'act_quant/AssignMaxEma',
]
expected_inputs.append(scope + activation_op_name)
self._AssertInputOpsAre(act_quant, expected_inputs)
output_op_name = (
scope + 'act_quant/delayed_quant/Switch_1'
if delay else 'control_dependency')
self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
self._AssertIdempotent(graph)
def testQuantize_Conv2dWithBatchNorm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunBatchNormTestOverParameters(
self._TestQuantize_Conv2dWithBatchNorm)
def _TestQuantize_Conv2dWithBatchNorm(self, activation, activation_op_name,
with_bypass, delay, fused_batch_norm,
use_resource, scope):
"""Tests quantization: inputs -> Conv2d with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
out_depth = 3 if with_bypass else 32
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=conv_scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'Conv2D', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_FCWithBatchNorm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunBatchNormTestOverParameters(self._TestQuantize_FCWithBatchNorm)
def _TestQuantize_FCWithBatchNorm(self, activation, activation_op_name,
with_bypass, delay, fused_batch_norm,
use_resource, scope):
"""Tests quantization: inputs -> FC with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=conv_scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'MatMul', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_DepthwiseConv2dWithBatchNorm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunBatchNormTestOverParameters(
self._TestQuantize_DepthwiseConv2dWithBatchNorm)
def _TestQuantize_DepthwiseConv2dWithBatchNorm(
self, activation, activation_op_name, with_bypass, delay,
fused_batch_norm, use_resource, scope):
"""Tests quantization: inputs -> DWConv2d with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=conv_scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name,
with_bypass, delay, use_resource)
def testQuantize_AtrousConvWithBatchNorm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
self._RunBatchNormTestOverParameters(
self._TestQuantize_AtrousConvWithBatchNorm)
def _TestQuantize_AtrousConvWithBatchNorm(
self, activation, activation_op_name, with_bypass, delay,
fused_batch_norm, use_resource, scope):
"""Tests quantization: inputs -> atrous conv with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
scope: String, specifies top level scope for the graph
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
dilation_rate = 2
conv_scope = self._GetConvScope(scope, with_bypass)
scope = '' if scope is None else scope
delim = '/' if scope else ''
node = separable_conv2d(
inputs,
None, [3, 3],
rate=dilation_rate,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=conv_scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name=scope + delim + 'AddV2')
node = activation(node, name=scope + delim + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name,
with_bypass, delay, use_resource)
def _AssertIdempotent(self, graph):
# Ensure that calling the rewrite again doesn't change the graph.
graph_def_before = str(graph.as_graph_def())
with graph.as_default():
# Ensuring that calling the rewrite again doesn't add more nodes.
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True)
graph_def_after = str(graph.as_graph_def())
self.assertEqual(graph_def_before, graph_def_after)
def testBatchNormForcedUpdates(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
parameter_list = [
# (activation, activation_op_name, fused_batch_norm)
(nn_ops.relu6, 'Relu6', False),
(nn_ops.relu, 'Relu', False),
(array_ops.identity, 'Identity', False),
(nn_ops.relu6, 'Relu6', True),
(nn_ops.relu, 'Relu', True),
(array_ops.identity, 'Identity', True),
]
for params in parameter_list:
self._TestBatchNormForcedUpdates(params[0], params[1], params[2], False)
self._TestBatchNormForcedUpdates(params[0], params[1], params[2], True)
def _TestBatchNormForcedUpdates(self, activation, activation_op_name,
fused_batch_norm, use_resource):
"""post_activation bypass quantization should happen with forced updates."""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
# Setting updates_collections to None forces updates adding an extra
# identity operation following batch norms.
bn_params = self._BatchNormParams(
fused=fused_batch_norm, force_updates=True)
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation,
normalizer_fn=batch_norm,
normalizer_params=bn_params,
scope='test/test')
bypass_tensor = math_ops.add(conv, input2, name='test/add')
# The output of the post_activation bypass will be another layer.
_ = conv2d(
bypass_tensor,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
normalizer_fn=batch_norm,
normalizer_params=bn_params,
activation_fn=activation,
scope='test/unused')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, is_training=True)
# Ensure that the bypass node is preceded by and followed by a
# FakeQuantWithMinMaxVar operation, since the output of the Add isn't an
# activation.
self.assertTrue('FakeQuantWithMinMaxVars' in
[c.type for c in bypass_tensor.consumers()])
self.assertTrue('FakeQuantWithMinMaxVars' in
[i.op.type for i in bypass_tensor.op.inputs])
with open('/tmp/bn_quant_test.pbtxt', 'w') as f:
f.write(str(graph.as_graph_def()))
def _GetConvScope(self, scope, with_bypass):
if scope is None:
scope = ''
delim = '/' if scope else ''
if with_bypass:
conv_scope = scope + delim + 'test2'
else:
conv_scope = scope
return conv_scope
def _BatchNormParams(self, fused=False, force_updates=False):
params = {
'center': True,
'scale': True,
'decay': 1.0 - 0.003,
'fused': fused
}
if force_updates:
params['updates_collections'] = None
return params
def _WeightInit(self, stddev):
"""Returns truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initialized that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
def _AssertInputOpsAre(self, op, in_op_names):
"""Asserts that all inputs to op come from in_op_names (disregarding order).
Args:
op: Operation to check inputs for.
in_op_names: List of strings, operations where all op's inputs should
come from.
"""
expected_inputs = [in_op_name + ':0' for in_op_name in in_op_names]
self.assertItemsEqual([t.name for t in op.inputs], expected_inputs)
def _AssertOutputGoesToOps(self, op, graph, out_op_names):
"""Asserts that outputs from op go to out_op_names (and perhaps others).
Args:
op: Operation to check outputs for.
graph: Graph where output operations are located.
out_op_names: List of strings, operations where op's outputs should go.
"""
for out_op_name in out_op_names:
out_op = graph.get_operation_by_name(out_op_name)
self.assertIn(op.outputs[0].name, [str(t.name) for t in out_op.inputs])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/quantize/python/quantize_parameterized_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for InputToOps class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class InputToOpsTest(test_util.TensorFlowTestCase):
def testNoConsumerOperations(self):
graph = ops.Graph()
with graph.as_default():
input_tensor = array_ops.zeros((1, 2, 3, 4))
input_to_ops_map = input_to_ops.InputToOps(graph)
consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)
self.assertEqual(0, len(consumer_operations))
def testOneConsumerOperation(self):
graph = ops.Graph()
with graph.as_default():
input_tensor = array_ops.zeros((1, 2, 3, 4))
output_tensor = nn_ops.relu6(input_tensor)
input_to_ops_map = input_to_ops.InputToOps(graph)
consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)
self.assertEqual(consumer_operations, {output_tensor.op})
def testSeveralConsumerOperations(self):
graph = ops.Graph()
with graph.as_default():
input_tensor = array_ops.zeros((1, 2, 3, 4))
output_tensor_1 = nn_ops.relu6(input_tensor)
output_tensor_2 = input_tensor + output_tensor_1
output_tensor_3 = input_tensor * output_tensor_2
input_to_ops_map = input_to_ops.InputToOps(graph)
consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)
self.assertEqual(consumer_operations,
{output_tensor_1.op, output_tensor_2.op,
output_tensor_3.op})
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/quantize/python/input_to_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network losses.
See [Contrib Losses](https://tensorflow.org/api_guides/python/contrib.losses).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.losses.python import metric_learning
# pylint: disable=wildcard-import
from tensorflow.contrib.losses.python.losses import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'absolute_difference',
'add_loss',
'hinge_loss',
'compute_weighted_loss',
'cosine_distance',
'get_losses',
'get_regularization_losses',
'get_total_loss',
'log_loss',
'mean_pairwise_squared_error',
'mean_squared_error',
'sigmoid_cross_entropy',
'softmax_cross_entropy',
'sparse_softmax_cross_entropy',
'metric_learning'
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/losses/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
__all__ = [
"absolute_difference", "add_loss", "cosine_distance",
"compute_weighted_loss", "get_losses", "get_regularization_losses",
"get_total_loss", "hinge_loss", "log_loss", "mean_pairwise_squared_error",
"mean_squared_error", "sigmoid_cross_entropy", "softmax_cross_entropy",
"sparse_softmax_cross_entropy"
]
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
axis = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses, axis=axis)
reduced_losses = math_ops.multiply(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: A tensor whose elements contain individual loss measurements.
num_present: The number of measurable losses in the tensor.
Returns:
A scalar representing the mean of the losses. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return math_ops.div_no_nan(total_loss, num_present, name="value")
@deprecated("2016-12-30", "Use tf.losses.compute_weighted_loss instead.")
def compute_weighted_loss(losses, weights=1.0, scope=None):
"""Computes the weighted loss.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.cast(losses, dtypes.float32)
weights = math_ops.cast(ops.convert_to_tensor(weights), dtypes.float32)
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
weights_shape = weights.get_shape()
if weights_shape.ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1):
weights = array_ops.squeeze(weights, [-1])
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# convert the result back to the input type
mean_loss = math_ops.cast(mean_loss, input_dtype)
add_loss(mean_loss)
return mean_loss
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
effect, tiled to match the size of `losses`. Following this effective tile,
the total number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
batch_size = array_ops.reshape(
array_ops.slice(array_ops.shape(losses), [0], [1]), [])
num_per_batch = math_ops.div(
math_ops.cast(array_ops.size(losses), dtypes.float32),
math_ops.cast(batch_size, dtypes.float32))
num_per_batch = array_ops.where(
math_ops.equal(weights, 0), 0.0, num_per_batch)
num_per_batch = math_ops.multiply(
array_ops.ones(array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1:
axis = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.cast(math_ops.not_equal(weights, 0), dtypes.float32),
axis=axis)
# Next, determine the number of elements that weights would broadcast to:
broadcast_dims = array_ops.slice(
array_ops.shape(losses), [weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.cast(math_ops.reduce_prod(broadcast_dims),
dtypes.float32)
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
@deprecated("2016-12-30", "Use tf.losses.add_loss instead.")
@add_arg_scope
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
Args:
loss: A loss `Tensor`.
loss_collection: Optional collection to add the loss to.
"""
if loss_collection:
ops.add_to_collection(loss_collection, loss)
@deprecated("2016-12-30", "Use tf.losses.get_losses instead.")
def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Gets the list of losses from the loss_collection.
Args:
scope: an optional scope for filtering the losses to return.
loss_collection: Optional losses collection.
Returns:
a list of loss tensors.
"""
return ops.get_collection(loss_collection, scope)
@deprecated("2016-12-30", "Use tf.losses.get_regularization_losses instead.")
def get_regularization_losses(scope=None):
"""Gets the regularization losses.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
A list of regularization losses as Tensors.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
@deprecated("2016-12-30", "Use tf.losses.get_total_loss instead.")
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.
Notice that the function adds the given losses to the regularization losses.
Args:
add_regularization_losses: A boolean indicating whether or not to use the
regularization losses in the sum.
name: The name of the returned tensor.
Returns:
A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
"""
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return math_ops.add_n(losses, name=name)
@deprecated("2016-12-30", "Use tf.losses.absolute_difference instead.")
def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "absolute_difference",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sigmoid_cross_entropy instead. Note that the order "
"of the predictions and labels arguments has been changed.")
def sigmoid_cross_entropy(logits,
multi_class_labels,
weights=1.0,
label_smoothing=0,
scope=None):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
logits: [batch_size, num_classes] logits outputs of the network .
multi_class_labels: [batch_size, num_classes] labels in (0, 1).
weights: Coefficients for the loss. The tensor must be a scalar, a tensor of
shape [batch_size] or shape [batch_size, num_classes].
label_smoothing: If greater than 0 then smooth the labels.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None.
"""
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
[logits, multi_class_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
if label_smoothing > 0:
multi_class_labels = (
multi_class_labels * (1 - label_smoothing) + 0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(
labels=multi_class_labels, logits=logits, name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.softmax_cross_entropy instead. Note that the order "
"of the logits and labels arguments has been changed.")
def softmax_cross_entropy(logits,
onehot_labels,
weights=1.0,
label_smoothing=0,
scope=None):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] one-hot-encoded labels.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None.
"""
with ops.name_scope(scope, "softmax_cross_entropy_loss",
[logits, onehot_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sparse_softmax_cross_entropy instead. Note that "
"the order of the logits and labels arguments has been changed.")
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64`
in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.log_loss instead. Note that the order of the "
"predictions and labels arguments has been changed.")
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
losses = -math_ops.multiply(
labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.hinge_loss instead. Note that the order of the "
"logits and labels arguments has been changed, and to stay "
"unweighted, reduction=Reduction.NONE")
def hinge_loss(logits, labels=None, scope=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor. Note that logits are assumed to be
unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
(resp. negative) binary prediction.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
scope: The scope for the operations performed in computing the loss.
Returns:
An unweighted `Tensor` of same shape as `logits` and `labels` representing
the
loss values across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.cast(labels, dtypes.float32)
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
return nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
@deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.")
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.mean_pairwise_squared_error instead. Note that the "
"order of the predictions and labels arguments has been changed.")
def mean_pairwise_squared_error(predictions,
labels=None,
weights=1.0,
scope=None):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of size [batch_size, d0, ... dN], the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN]
where N+1 is the total number of dimensions in `predictions`.
labels: The ground truth output tensor, whose shape must match the shape of
the `predictions` tensor.
weights: Coefficients for the loss a scalar, a tensor of shape [batch_size]
or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_pairwise_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
weights = math_ops.cast(ops.convert_to_tensor(weights), dtypes.float32)
diffs = math_ops.subtract(predictions, labels)
# Need to verify here since the function doesn't use compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
axis = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), axis=axis)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * math_ops.div_no_nan(
sum_squares_diff_per_batch, num_present_per_batch, name="value")
sum_diff = math_ops.reduce_sum(diffs, axis=axis)
term2 = 2.0 * math_ops.div_no_nan(
math_ops.square(sum_diff),
math_ops.square(num_present_per_batch),
name="value")
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(
math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
add_loss(mean_loss)
return mean_loss
@deprecated("2016-12-30", "Use tf.losses.cosine_distance instead.")
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def cosine_distance(predictions,
labels=None,
axis=None,
weights=1.0,
scope=None,
dim=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
labels: A `Tensor` whose shape matches 'predictions'
axis: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
dim: The old (deprecated) name for `axis`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
axis = deprecated_argument_lookup(
"axis", axis, "dim", dim)
if axis is None:
raise ValueError("You must specify 'axis'.")
with ops.name_scope(scope, "cosine_distance_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(
radial_diffs, axis=[
axis,
])
return compute_weighted_loss(losses, weights, scope=scope)
|
tensorflow-master
|
tensorflow/contrib/losses/python/losses/loss_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network losses.
See [Contrib Losses](https://tensorflow.org/api_guides/python/contrib.losses).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.losses.python.losses.loss_ops import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/losses/python/losses/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.losses.python.losses.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.absolute_difference(self._predictions, self._predictions)
with self.cached_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.absolute_difference(self._predictions, self._labels)
with self.cached_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.cached_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2,])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.cached_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.cached_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.cached_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.cached_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.cached_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrect(self):
with self.cached_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.cached_session():
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.cached_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.cached_session():
loss = loss_ops.softmax_cross_entropy(logits, labels,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.cached_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.cached_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.cached_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=weights).eval()
def testSoftmaxLabelSmoothing(self):
with self.cached_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partition function
# becomes \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = loss_ops.softmax_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(dtypes.float32, shape=[None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.cached_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.cached_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]])
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrectInt32Labels(self):
with self.cached_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.cached_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.cached_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([0, 1, 2])
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testNonZeroLossWithColumnWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([[1.2], [3.4], [5.6]])
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.cached_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 1], [2, 3]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.cached_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.cached_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SigmoidCrossEntropyLossTest(test.TestCase):
def testAllCorrectSigmoid(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testAllWrongSigmoid(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(1700.0 / 7.0, loss.eval(), 3)
def testMultiCorrectSigmoid(self):
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = constant_op.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.cached_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.cached_session():
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = loss_ops.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.cached_session():
label_smoothing = 0.1
sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = constant_op.constant([[1, 0, 1]])
sigmoid_loss = loss_ops.sigmoid_cross_entropy(
sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing)
softmax_logits = constant_op.constant(
[[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = loss_ops.softmax_cross_entropy(
softmax_logits, softmax_labels, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
labels = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_labels = labels
epsilon = 1e-7
self._expected_losses = np.multiply(
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
self._predictions = constant_op.constant(predictions)
self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.log_loss(self._labels, self._labels)
with self.cached_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape)
loss = loss_ops.log_loss(tf_predictions, self._labels)
with self.cached_session():
self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
loss = loss_ops.log_loss(self._predictions, self._labels)
with self.cached_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels,
constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._predictions, self._labels, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
loss = loss_ops.log_loss(
tf_predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
tf_weights = constant_op.constant(weights, shape=(2, 3))
loss = loss_ops.log_loss(tf_predictions, self._labels, tf_weights)
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weights = array_ops.zeros(shape=(2, 3))
loss = loss_ops.log_loss(self._predictions, self._labels, tf_weights)
with self.cached_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.cached_session():
logits = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = loss_ops.hinge_loss(logits, labels).eval()
def testAllOutsideMargin(self):
with self.cached_session():
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = loss_ops.hinge_loss(logits, labels)
self.assertAllClose(loss.eval(), [0.0, 0.0, 0.0, 0.0], atol=1e-3)
def testSomeInsideMargin(self):
with self.cached_session():
logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), [[0.3], [0.0], [0.0], [0.4]], atol=1e-3)
def testSomeMisclassified(self):
with self.cached_session():
logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(
loss.eval(), [[[0.0], [1.4], [0.0], [2.1]]], atol=1e-3)
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.mean_squared_error(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_squared_error(self._predictions, self._predictions)
with self.cached_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_squared_error(self._predictions, self._labels)
with self.cached_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels,
constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2,])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.cached_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaresErrorTest(test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
tmp = (x - y) * (x - y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels))
with self.cached_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels))
with self.cached_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=weights)
with self.cached_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weights = 0
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weights = 2.3
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.float32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(weights))
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(weights * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.cached_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weights = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.cached_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weights = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.int32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(
weights, shape=[2]))
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weights = np.zeros((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.cached_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossIsAssociativeAcrossBatchElements(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
height = 3
width = 4
shape = (1, height, width, 1)
labels0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
labels1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
loss0 = loss_ops.mean_pairwise_squared_error(
predictions=predictions0,
labels=labels0)
loss1 = loss_ops.mean_pairwise_squared_error(
predictions=predictions1,
labels=labels1)
loss0_1 = loss_ops.mean_pairwise_squared_error(
predictions=array_ops.concat([predictions0, predictions1], 0),
labels=array_ops.concat([labels0, labels1], 0))
with self.cached_session() as session:
loss0, loss1, loss0_1 = session.run([loss0, loss1, loss0_1])
self.assertTrue(loss0 > 0)
self.assertTrue(loss1 > 0)
self.assertAlmostEqual(loss0 + loss1, loss0_1, 5)
class CosineDistanceLossTest(test.TestCase):
def setUp(self):
self._predictions = np.asarray([
[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]
]).reshape((3, 2, 3))
self._labels = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2)
with self.cached_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2)
with self.cached_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = constant_op.constant(
predictions, shape=(3, 1, 3), dtype=dtypes.float32)
tf_labels = constant_op.constant(
labels, shape=(3, 1, 3), dtype=dtypes.float32)
loss = loss_ops.cosine_distance(tf_preds, tf_labels, dim=2)
with self.cached_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant([1, 0, 0]))
with self.cached_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.cached_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = array_ops.placeholder(dtypes.float32)
with self.cached_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._labels.shape)
loss = loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.cached_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3,)))
with self.cached_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 2)))
with self.cached_session():
self.assertEqual(0, loss.eval())
class ComputeWeightedLossTest(test.TestCase):
def testHingeLoss(self):
logits = constant_op.constant([1.2, 0.4, -1.0, -1.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss = loss_ops.compute_weighted_loss(losses)
self.assertTrue(loss_ops.get_losses())
with self.cached_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(loss.eval(), 3.5 / 4.0, atol=1e-3)
class AddLossTest(test.TestCase):
def testAddExternalLoss(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses))
self.assertTrue(loss_ops.get_losses())
total_loss = loss_ops.get_total_loss()
with self.cached_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
self.assertAllClose(total_loss.eval(), 3.5 / 4.0, atol=1e-3)
def testNoneLossCollection(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses), loss_collection=None)
self.assertFalse(loss_ops.get_losses())
with self.cached_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
def testNoCollectLosses(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/losses/python/losses/loss_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements various metric learning losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.summary import summary
try:
# pylint: disable=g-import-not-at-top
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
math_ops.reduce_sum(
math_ops.square(array_ops.transpose(feature)),
axis=[0],
keepdims=True)) - 2.0 * math_ops.matmul(feature,
array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared +
math_ops.cast(error_mask, dtypes.float32) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances,
math_ops.cast(math_ops.logical_not(error_mask), dtypes.float32))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
margin=1.0):
"""Computes the contrastive loss.
This loss encourages the embedding to be close to each other for
the samples of the same label and the embedding to be far apart at least
by the margin constant for the samples of different labels.
See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
binary labels indicating positive vs negative pair.
embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
images. Embeddings should be l2 normalized.
embeddings_positive: 2-D float `Tensor` of embedding vectors for the
positive images. Embeddings should be l2 normalized.
margin: margin term in the loss definition.
Returns:
contrastive_loss: tf.float32 scalar.
"""
# Get per pair distances
distances = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.squared_difference(embeddings_anchor, embeddings_positive),
1))
# Add contrastive loss for the siamese network.
# label here is {0,1} for neg, pos.
return math_ops.reduce_mean(
math_ops.cast(labels, distances.dtype) * math_ops.square(distances) +
(1. - math_ops.cast(labels, distances.dtype)) *
math_ops.square(math_ops.maximum(margin - distances, 0.)),
name='contrastive_loss')
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(data - axis_minimums, mask), dim,
keepdims=True) + axis_minimums
return masked_maximums
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(data - axis_maximums, mask), dim,
keepdims=True) + axis_maximums
return masked_minimums
def triplet_semihard_loss(labels, embeddings, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings with
the same labels) to be smaller than the minimum negative distance among
which are at least greater than the positive distance plus the margin constant
(called semi-hard negative) in the mini-batch. If no such negative exists,
uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
triplet_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
# Compute the mask.
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = math_ops.reduce_sum(mask_positives)
triplet_loss = math_ops.truediv(
math_ops.reduce_sum(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0)),
num_positives,
name='triplet_semihard_loss')
return triplet_loss
# pylint: disable=line-too-long
def npairs_loss(labels, embeddings_anchor, embeddings_positive,
reg_lambda=0.002, print_losses=False):
"""Computes the npairs loss.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels.
See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
Args:
labels: 1-D tf.int32 `Tensor` of shape [batch_size/2].
embeddings_anchor: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
"""
# pylint: enable=line-too-long
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(
0.25 * reg_lambda, reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
labels_remapped = math_ops.cast(
math_ops.equal(labels, array_ops.transpose(labels)), dtypes.float32)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def _build_multilabel_adjacency(sparse_labels):
"""Builds multilabel adjacency matrix.
As of March 14th, 2017, there's no op for the dot product between
two sparse tensors in TF. However, there is `sparse_minimum` op which is
equivalent to an AND op between two sparse boolean tensors.
This computes the dot product between two sparse boolean inputs.
Args:
sparse_labels: List of 1-D boolean sparse tensors.
Returns:
adjacency_matrix: 2-D dense `Tensor`.
"""
num_pairs = len(sparse_labels)
adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
for i in range(num_pairs):
for j in range(num_pairs):
sparse_dot_product = math_ops.cast(
sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
sparse_labels[i], sparse_labels[j])),
dtypes.float32)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
one_hot_matrix = array_ops.pad(sparse_dot_product,
[[i, num_pairs-i-1],
[j, num_pairs-j-1]], 'CONSTANT')
adjacency_matrix += one_hot_matrix
return adjacency_matrix
def npairs_loss_multilabel(sparse_labels, embeddings_anchor,
embeddings_positive, reg_lambda=0.002,
print_losses=False):
r"""Computes the npairs loss with multilabel data.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels. Here, the similarity is defined by the
dot product between two embedding vectors. S_{i,j} = f(x_i)^T f(x_j)
To deal with multilabel inputs, we use the count of label intersection
i.e. L_{i,j} = | set_of_labels_for(i) \cap set_of_labels_for(j) |
Then we normalize each rows of the count based label matrix so that each row
sums to one.
Args:
sparse_labels: List of 1-D Boolean `SparseTensor` of dense_shape
[batch_size/2, num_classes] labels for the anchor-pos pairs.
embeddings_anchor: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
Raises:
TypeError: When the specified sparse_labels is not a `SparseTensor`.
"""
if False in [isinstance(
l, sparse_tensor.SparseTensor) for l in sparse_labels]:
raise TypeError(
'sparse_labels must be a list of SparseTensors, but got %s' % str(
sparse_labels))
with ops.name_scope('NpairsLossMultiLabel'):
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(0.25 * reg_lambda,
reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# TODO(coreylynch): need to check the sparse values
# TODO(coreylynch): are composed only of 0's and 1's.
multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.cast(multilabel_adjacency_matrix, dtypes.float32)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def lifted_struct_loss(labels, embeddings, margin=1.0):
"""Computes the lifted structured loss.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than any negative distances (between a
pair of embeddings with different labels) in the mini-batch in a way
that is differentiable with respect to the embedding vectors.
See: https://arxiv.org/abs/1511.06452.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should not
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
lifted_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pairwise_distances = pairwise_distance(embeddings)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
diff = margin - pairwise_distances
mask = math_ops.cast(adjacency_not, dtype=dtypes.float32)
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
row_minimums = math_ops.reduce_min(diff, 1, keepdims=True)
row_negative_maximums = math_ops.reduce_max(
math_ops.multiply(diff - row_minimums, mask), 1,
keepdims=True) + row_minimums
# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
# where m_i is the max of alpha - negative D_i's.
# This matches the Caffe loss layer implementation at:
# https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long
max_elements = math_ops.maximum(
row_negative_maximums, array_ops.transpose(row_negative_maximums))
diff_tiled = array_ops.tile(diff, [batch_size, 1])
mask_tiled = array_ops.tile(mask, [batch_size, 1])
max_elements_vect = array_ops.reshape(
array_ops.transpose(max_elements), [-1, 1])
loss_exp_left = array_ops.reshape(
math_ops.reduce_sum(
math_ops.multiply(
math_ops.exp(diff_tiled - max_elements_vect), mask_tiled),
1,
keepdims=True), [batch_size, batch_size])
loss_mat = max_elements + math_ops.log(
loss_exp_left + array_ops.transpose(loss_exp_left))
# Add the positive distance.
loss_mat += pairwise_distances
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.
num_positives = math_ops.reduce_sum(mask_positives) / 2.0
lifted_loss = math_ops.truediv(
0.25 * math_ops.reduce_sum(
math_ops.square(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0))),
num_positives,
name='liftedstruct_loss')
return lifted_loss
def update_1d_tensor(y, index, value):
"""Updates 1d tensor y so that y[index] = value.
Args:
y: 1-D Tensor.
index: index of y to modify.
value: new value to write at y[index].
Returns:
y_mod: 1-D Tensor. Tensor y after the update.
"""
value = array_ops.squeeze(value)
# modify the 1D tensor x at index with value.
# ex) chosen_ids = update_1D_tensor(chosen_ids, cluster_idx, best_medoid)
y_before = array_ops.slice(y, [0], [index])
y_after = array_ops.slice(y, [index + 1], [-1])
y_mod = array_ops.concat([y_before, [value], y_after], 0)
return y_mod
def get_cluster_assignment(pairwise_distances, centroid_ids):
"""Assign data points to the neareset centroids.
Tensorflow has numerical instability and doesn't always choose
the data point with theoretically zero distance as it's nearest neighbor.
Thus, for each centroid in centroid_ids, explicitly assign
the centroid itself as the nearest centroid.
This is done through the mask tensor and the constraint_vect tensor.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of centroid indices.
Returns:
y_fixed: 1-D tensor of cluster assignment.
"""
predictions = math_ops.argmin(
array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
batch_size = array_ops.shape(pairwise_distances)[0]
# Deal with numerical instability
mask = math_ops.reduce_any(array_ops.one_hot(
centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
axis=0)
constraint_one_hot = math_ops.multiply(
array_ops.one_hot(centroid_ids,
batch_size,
array_ops.constant(1, dtype=dtypes.int64),
array_ops.constant(0, dtype=dtypes.int64),
axis=0,
dtype=dtypes.int64),
math_ops.cast(math_ops.range(array_ops.shape(centroid_ids)[0]),
dtypes.int64))
constraint_vect = math_ops.reduce_sum(
array_ops.transpose(constraint_one_hot), axis=0)
y_fixed = array_ops.where(mask, constraint_vect, predictions)
return y_fixed
def compute_facility_energy(pairwise_distances, centroid_ids):
"""Compute the average travel distance to the assigned centroid.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of indices.
Returns:
facility_energy: dtypes.float32 scalar.
"""
return -1.0 * math_ops.reduce_sum(
math_ops.reduce_min(
array_ops.gather(pairwise_distances, centroid_ids), axis=0))
def compute_clustering_score(labels, predictions, margin_type):
"""Computes the clustering score via sklearn.metrics functions.
There are various ways to compute the clustering score. Intuitively,
we want to measure the agreement of two clustering assignments (labels vs
predictions) ignoring the permutations and output a score from zero to one.
(where the values close to one indicate significant agreement).
This code supports following scoring functions:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
See http://scikit-learn.org/stable/modules/classes.html#clustering-metrics
for the detailed descriptions.
Args:
labels: 1-D Tensor. ground truth cluster assignment.
predictions: 1-D Tensor. predicted cluster assignment.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
clustering_score: dtypes.float32 scalar.
The possible valid values are from zero to one.
Zero means the worst clustering and one means the perfect clustering.
Raises:
ValueError: margin_type is not recognized.
"""
margin_type_to_func = {
'nmi': _compute_nmi_score,
'ami': _compute_ami_score,
'ari': _compute_ari_score,
'vmeasure': _compute_vmeasure_score,
'const': _compute_zeroone_score
}
if margin_type not in margin_type_to_func:
raise ValueError('Unrecognized margin_type: %s' % margin_type)
clustering_score_fn = margin_type_to_func[margin_type]
return array_ops.squeeze(clustering_score_fn(labels, predictions))
def _compute_nmi_score(labels, predictions):
return math_ops.cast(
script_ops.py_func(
metrics.normalized_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='nmi'),
dtypes.float32)
def _compute_ami_score(labels, predictions):
ami_score = math_ops.cast(
script_ops.py_func(
metrics.adjusted_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='ami'),
dtypes.float32)
return math_ops.maximum(0.0, ami_score)
def _compute_ari_score(labels, predictions):
ari_score = math_ops.cast(
script_ops.py_func(
metrics.adjusted_rand_score, [labels, predictions], [dtypes.float64],
name='ari'),
dtypes.float32)
# ari score can go below 0
# http://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-score
return math_ops.maximum(0.0, ari_score)
def _compute_vmeasure_score(labels, predictions):
vmeasure_score = math_ops.cast(
script_ops.py_func(
metrics.v_measure_score, [labels, predictions], [dtypes.float64],
name='vmeasure'),
dtypes.float32)
return math_ops.maximum(0.0, vmeasure_score)
def _compute_zeroone_score(labels, predictions):
zeroone_score = math_ops.cast(
math_ops.equal(
math_ops.reduce_sum(
math_ops.cast(math_ops.equal(labels, predictions), dtypes.int32)),
array_ops.shape(labels)[0]),
dtypes.float32)
return zeroone_score
def _find_loss_augmented_facility_idx(pairwise_distances, labels, chosen_ids,
candidate_ids, margin_multiplier,
margin_type):
"""Find the next centroid that maximizes the loss augmented inference.
This function is a subroutine called from compute_augmented_facility_locations
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of current centroid indices.
candidate_ids: 1-D Tensor of candidate indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
integer index.
"""
num_candidates = array_ops.shape(candidate_ids)[0]
pairwise_distances_chosen = array_ops.gather(pairwise_distances, chosen_ids)
pairwise_distances_candidate = array_ops.gather(
pairwise_distances, candidate_ids)
pairwise_distances_chosen_tile = array_ops.tile(
pairwise_distances_chosen, [1, num_candidates])
candidate_scores = -1.0 * math_ops.reduce_sum(
array_ops.reshape(
math_ops.reduce_min(
array_ops.concat([
pairwise_distances_chosen_tile,
array_ops.reshape(pairwise_distances_candidate, [1, -1])
], 0),
axis=0,
keepdims=True), [num_candidates, -1]),
axis=1)
nmi_scores = array_ops.zeros([num_candidates])
iteration = array_ops.constant(0)
def func_cond(iteration, nmi_scores):
del nmi_scores # Unused in func_cond()
return iteration < num_candidates
def func_body(iteration, nmi_scores):
predictions = get_cluster_assignment(
pairwise_distances,
array_ops.concat([chosen_ids, [candidate_ids[iteration]]], 0))
nmi_score_i = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
# return 1 - NMI score as the structured loss.
# because NMI is higher the better [0,1].
return iteration + 1, nmi_scores + array_ops.concat(
[pad_before, [1.0 - nmi_score_i], pad_after], 0)
_, nmi_scores = control_flow_ops.while_loop(
func_cond, func_body, [iteration, nmi_scores])
candidate_scores = math_ops.add(
candidate_scores, margin_multiplier * nmi_scores)
argmax_index = math_ops.cast(
math_ops.argmax(candidate_scores, axis=0), dtypes.int32)
return candidate_ids[argmax_index]
def compute_augmented_facility_locations(pairwise_distances, labels, all_ids,
margin_multiplier, margin_type):
"""Computes the centroid locations.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
all_ids: 1-D Tensor of all data indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: 1-D Tensor of chosen centroid indices.
"""
def func_cond_augmented(iteration, chosen_ids):
del chosen_ids # Unused argument in func_cond_augmented.
return iteration < num_classes
def func_body_augmented(iteration, chosen_ids):
# find a new facility location to add
# based on the clustering score and the NMI score
candidate_ids = array_ops.setdiff1d(all_ids, chosen_ids)[0]
new_chosen_idx = _find_loss_augmented_facility_idx(pairwise_distances,
labels, chosen_ids,
candidate_ids,
margin_multiplier,
margin_type)
chosen_ids = array_ops.concat([chosen_ids, [new_chosen_idx]], 0)
return iteration + 1, chosen_ids
num_classes = array_ops.size(array_ops.unique(labels)[0])
chosen_ids = array_ops.constant(0, dtype=dtypes.int32, shape=[0])
# num_classes get determined at run time based on the sampled batch.
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented,
func_body_augmented, [iteration, chosen_ids],
shape_invariants=[iteration.get_shape(), tensor_shape.TensorShape(
[None])])
return chosen_ids
def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
labels, chosen_ids, cluster_member_ids,
cluster_idx, margin_multiplier, margin_type):
"""Updates the cluster medoid per cluster.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
pairwise_distances_subset: 2-D Tensor of pairwise distances for one cluster.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
cluster_member_ids: 1-D Tensor of cluster member indices for one cluster.
cluster_idx: Index of this one cluster.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond(iteration, scores_margin):
del scores_margin # Unused variable scores_margin.
return iteration < num_candidates
def func_body(iteration, scores_margin):
# swap the current medoid with the candidate cluster member
candidate_medoid = math_ops.cast(cluster_member_ids[iteration], dtypes.int32)
tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
return iteration + 1, scores_margin + array_ops.concat(
[pad_before, [1.0 - metric_score], pad_after], 0)
# pairwise_distances_subset is of size [p, 1, 1, p],
# the intermediate dummy dimensions at
# [1, 2] makes this code work in the edge case where p=1.
# this happens if the cluster size is one.
scores_fac = -1.0 * math_ops.reduce_sum(
array_ops.squeeze(pairwise_distances_subset, [1, 2]), axis=0)
iteration = array_ops.constant(0)
num_candidates = array_ops.size(cluster_member_ids)
scores_margin = array_ops.zeros([num_candidates])
_, scores_margin = control_flow_ops.while_loop(func_cond, func_body,
[iteration, scores_margin])
candidate_scores = math_ops.add(scores_fac, margin_multiplier * scores_margin)
argmax_index = math_ops.cast(
math_ops.argmax(candidate_scores, axis=0), dtypes.int32)
best_medoid = math_ops.cast(cluster_member_ids[argmax_index], dtypes.int32)
chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
return chosen_ids
def update_all_medoids(pairwise_distances, predictions, labels, chosen_ids,
margin_multiplier, margin_type):
"""Updates all cluster medoids a cluster at a time.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
predictions: 1-D Tensor of predicted cluster assignment.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond_augmented_pam(iteration, chosen_ids):
del chosen_ids # Unused argument.
return iteration < num_classes
def func_body_augmented_pam(iteration, chosen_ids):
"""Call the update_medoid_per_cluster subroutine."""
mask = math_ops.equal(
math_ops.cast(predictions, dtypes.int64),
math_ops.cast(iteration, dtypes.int64))
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
chosen_ids = update_medoid_per_cluster(pairwise_distances,
pairwise_distances_subset, labels,
chosen_ids, this_cluster_ids,
iteration, margin_multiplier,
margin_type)
return iteration + 1, chosen_ids
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented_pam, func_body_augmented_pam, [iteration, chosen_ids])
return chosen_ids
def compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids,
pam_max_iter=5):
"""Refine the cluster centroids with PAM local search.
For fixed iterations, alternate between updating the cluster assignment
and updating cluster medoids.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
chosen_ids: 1-D Tensor of initial estimate of cluster centroids.
pam_max_iter: Number of refinement iterations.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
for _ in range(pam_max_iter):
# update the cluster assignment given the chosen_ids (S_pred)
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# update the medoids per each cluster
chosen_ids = update_all_medoids(pairwise_distances, predictions, labels,
chosen_ids, margin_multiplier, margin_type)
return chosen_ids
def compute_gt_cluster_score(pairwise_distances, labels):
"""Compute ground truth facility location score.
Loop over each unique classes and compute average travel distances.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
Returns:
gt_cluster_score: dtypes.float32 score.
"""
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
gt_cluster_score = array_ops.constant(0.0, dtype=dtypes.float32)
def func_cond(iteration, gt_cluster_score):
del gt_cluster_score # Unused argument.
return iteration < num_classes
def func_body(iteration, gt_cluster_score):
"""Per each cluster, compute the average travel distance."""
mask = math_ops.equal(labels, unique_class_ids[iteration])
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
this_cluster_score = -1.0 * math_ops.reduce_min(
math_ops.reduce_sum(
pairwise_distances_subset, axis=0))
return iteration + 1, gt_cluster_score + this_cluster_score
_, gt_cluster_score = control_flow_ops.while_loop(
func_cond, func_body, [iteration, gt_cluster_score])
return gt_cluster_score
def cluster_loss(labels,
embeddings,
margin_multiplier,
enable_pam_finetuning=True,
margin_type='nmi',
print_losses=False):
"""Computes the clustering loss.
The following structured margins are supported:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
Args:
labels: 2-D Tensor of labels of shape [batch size, 1]
embeddings: 2-D Tensor of embeddings of shape
[batch size, embedding dimension]. Embeddings should be l2 normalized.
margin_multiplier: float32 scalar. multiplier on the structured margin term
See section 3.2 of paper for discussion.
enable_pam_finetuning: Boolean, Whether to run local pam refinement.
See section 3.4 of paper for discussion.
margin_type: Type of structured margin to use. See section 3.2 of
paper for discussion. Can be 'nmi', 'ami', 'ari', 'vmeasure', 'const'.
print_losses: Boolean. Option to print the loss.
Paper: https://arxiv.org/abs/1612.01213.
Returns:
clustering_loss: A float32 scalar `Tensor`.
Raises:
ImportError: If sklearn dependency is not installed.
"""
if not HAS_SKLEARN:
raise ImportError('Cluster loss depends on sklearn.')
pairwise_distances = pairwise_distance(embeddings)
labels = array_ops.squeeze(labels)
all_ids = math_ops.range(array_ops.shape(embeddings)[0])
# Compute the loss augmented inference and get the cluster centroids.
chosen_ids = compute_augmented_facility_locations(pairwise_distances, labels,
all_ids, margin_multiplier,
margin_type)
# Given the predicted centroids, compute the clustering score.
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Branch whether to use PAM finetuning.
if enable_pam_finetuning:
# Initialize with augmented facility solution.
chosen_ids = compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids)
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Given the predicted centroids, compute the cluster assignments.
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# Compute the clustering (i.e. NMI) score between the two assignments.
clustering_score_pred = compute_clustering_score(labels, predictions,
margin_type)
# Compute the clustering score from labels.
score_gt = compute_gt_cluster_score(pairwise_distances, labels)
# Compute the hinge loss.
clustering_loss = math_ops.maximum(
score_pred + margin_multiplier * (1.0 - clustering_score_pred) - score_gt,
0.0,
name='clustering_loss')
clustering_loss.set_shape([])
if print_losses:
clustering_loss = logging_ops.Print(
clustering_loss,
['clustering_loss: ', clustering_loss, array_ops.shape(
clustering_loss)])
# Clustering specific summary.
summary.scalar('losses/score_pred', score_pred)
summary.scalar('losses/' + margin_type, clustering_score_pred)
summary.scalar('losses/score_gt', score_gt)
return clustering_loss
|
tensorflow-master
|
tensorflow/contrib/losses/python/metric_learning/metric_loss_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network losses.
See [Contrib Losses](https://tensorflow.org/api_guides/python/contrib.losses).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'contrastive_loss',
'cluster_loss',
'lifted_struct_loss',
'npairs_loss',
'npairs_loss_multilabel',
'triplet_semihard_loss',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/losses/python/metric_learning/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.cached_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.cached_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.cached_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.cached_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.cached_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.cached_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.cached_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.cached_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.contrib.rate module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
_to_replace = re.compile("[^A-Za-z0-9.]")
class Rate(object):
"""Computes the rate of change since the last rate call."""
def __init__(self, name=None):
self._built = False
self._vars = []
self._initial_values = {}
name = name or self.__class__.__name__
# Replace things like spaces in name to create a valid scope name.
scope_name = _to_replace.sub("_", name)
# We create the variable scope now to get the unique name that will
# be used as a variable prefix when build() calls _add_variable().
with variable_scope.variable_scope(
scope_name, use_resource=True, reuse=False) as scope:
pos = scope.name.rfind(scope_name)
self._name = name + scope.name[pos + len(scope_name):]
self._scope = scope
# Ensures that if the user calls build directly we still set self._built to
# True to prevent variables from being recreated.
self._build = self.build
if context.executing_eagerly():
self._construction_scope = context.eager_mode
else:
# We make self.call() into a graph callable here, so that we can
# return a single op that performs all of the variable updates.
self._construction_scope = ops.get_default_graph().as_default
self.call = function.defun(self.call)
def build(self, values, denominator):
"""Method to create variables.
Called by `__call__()` before `call()` for the first time.
Args:
values: The numerator for rate.
denominator: Value to which the rate is taken with respect.
"""
self.numer = self._add_variable(
name="numer", shape=values.get_shape(), dtype=dtypes.float64)
self.denom = self._add_variable(
name="denom", shape=denominator.get_shape(), dtype=dtypes.float64)
self.prev_values = self._add_variable(
name="prev_values", shape=values.get_shape(), dtype=dtypes.float64)
self.prev_denominator = self._add_variable(
name="prev_denominator",
shape=denominator.get_shape(),
dtype=dtypes.float64)
self._built = True
def __call__(self, *args, **kwargs):
"""Returns op to execute to update.
Returns None if eager execution is enabled.
Returns a graph-mode function if graph execution is enabled.
Args:
*args:
**kwargs: A mini-batch of inputs to Rate, passed on to `call()`.
"""
if not self._built:
with variable_scope.variable_scope(
self._scope), self._construction_scope():
self.build(*args, **kwargs)
self._built = True
return self.call(*args, **kwargs)
@property
def name(self):
return self._name
@property
def variables(self):
return self._vars
def _add_variable(self, name, shape=None, dtype=None):
"""Private method for adding variables to the graph."""
if self._built:
raise RuntimeError("Can't call add_variable() except in build().")
v = resource_variable_ops.ResourceVariable(
lambda: array_ops.zeros(shape, dtype),
trainable=False,
validate_shape=True,
name=name,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
return v
def call(self, values, denominator):
"""Computes the rate since the last call.
Args:
values: Tensor with the per-example value.
denominator: Measure to take the rate with respect to.
Returns:
The rate or 0 if denominator is unchanged since last call.
"""
if denominator.dtype != dtypes.float64:
denominator = math_ops.cast(denominator, dtypes.float64)
if values.dtype != dtypes.float64:
values = math_ops.cast(values, dtypes.float64)
state_ops.assign(self.numer, math_ops.subtract(values, self.prev_values))
state_ops.assign(self.denom,
math_ops.subtract(denominator, self.prev_denominator))
state_ops.assign(self.prev_values, values)
state_ops.assign(self.prev_denominator, denominator)
return math_ops.div_no_nan(self.numer,
math_ops.maximum(self.denom, 0),
name="safe_rate")
|
tensorflow-master
|
tensorflow/contrib/rate/rate.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Rate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rate import rate
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RateTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testBuildRate(self):
m = rate.Rate()
m.build(
constant_op.constant([1], dtype=dtypes.float32),
constant_op.constant([2], dtype=dtypes.float32))
old_numer = m.numer
m(
constant_op.constant([2], dtype=dtypes.float32),
constant_op.constant([2], dtype=dtypes.float32))
self.assertTrue(old_numer is m.numer)
@test_util.run_in_graph_and_eager_modes()
def testBasic(self):
with self.cached_session():
r_ = rate.Rate()
a = r_(array_ops.ones([1]), denominator=array_ops.ones([1]))
self.evaluate(variables.global_variables_initializer())
self.evaluate(variables.local_variables_initializer())
self.assertEqual([[1]], self.evaluate(a))
b = r_(constant_op.constant([2]), denominator=constant_op.constant([2]))
self.assertEqual([[1]], self.evaluate(b))
c = r_(constant_op.constant([4]), denominator=constant_op.constant([3]))
self.assertEqual([[2]], self.evaluate(c))
d = r_(constant_op.constant([16]), denominator=constant_op.constant([3]))
self.assertEqual([[0]], self.evaluate(d)) # divide by 0
def testNamesWithSpaces(self):
m1 = rate.Rate(name="has space")
m1(array_ops.ones([1]), array_ops.ones([1]))
self.assertEqual(m1.name, "has space")
self.assertEqual(m1.prev_values.name, "has_space_1/prev_values:0")
@test_util.run_in_graph_and_eager_modes()
def testWhileLoop(self):
with self.cached_session():
r_ = rate.Rate()
def body(value, denom, i, ret_rate):
i += 1
ret_rate = r_(value, denom)
with ops.control_dependencies([ret_rate]):
value = math_ops.add(value, 2)
denom = math_ops.add(denom, 1)
return [value, denom, i, ret_rate]
def condition(v, d, i, r):
del v, d, r # unused vars by condition
return math_ops.less(i, 100)
i = constant_op.constant(0)
value = constant_op.constant([1], dtype=dtypes.float64)
denom = constant_op.constant([1], dtype=dtypes.float64)
ret_rate = r_(value, denom)
self.evaluate(variables.global_variables_initializer())
self.evaluate(variables.local_variables_initializer())
loop = control_flow_ops.while_loop(condition, body,
[value, denom, i, ret_rate])
self.assertEqual([[2]], self.evaluate(loop[3]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/rate/rate_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GridRNN cells
## This package provides classes for GridRNN
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.grid_rnn.python.ops.grid_rnn_cell import *
# pylint: enable=unused-import,wildcard-import,line-too-long
|
tensorflow-master
|
tensorflow/contrib/grid_rnn/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/grid_rnn/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GridRNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.grid_rnn.python.ops import grid_rnn_cell
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class GridRNNCellTest(test.TestCase):
def testGrid2BasicLSTMCell(self):
with self.test_session(use_gpu=False) as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.2)) as root_scope:
x = array_ops.zeros([1, 3])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),
(array_ops.zeros([1, 2]), array_ops.zeros([1, 2])))
cell = grid_rnn_cell.Grid2BasicLSTMCell(2)
self.assertEqual(cell.state_size, ((2, 2), (2, 2)))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
self.assertEqual(s[1].c.get_shape(), (1, 2))
self.assertEqual(s[1].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x:
np.array([[1., 1., 1.]]),
m: ((np.array([[0.1, 0.2]]), np.array([[0.3, 0.4]])),
(np.array([[0.5, 0.6]]), np.array([[0.7, 0.8]])))
})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertEqual(res_s[0].c.shape, (1, 2))
self.assertEqual(res_s[0].h.shape, (1, 2))
self.assertEqual(res_s[1].c.shape, (1, 2))
self.assertEqual(res_s[1].h.shape, (1, 2))
self.assertAllClose(res_g, ([[0.36617181, 0.36617181]],))
self.assertAllClose(
res_s, (([[0.71053141, 0.71053141]], [[0.36617181, 0.36617181]]),
([[0.72320831, 0.80555487]], [[0.39102408, 0.42150158]])))
# emulate a loop through the input sequence,
# where we call cell() multiple times
root_scope.reuse_variables()
g2, s2 = cell(x, m)
self.assertEqual(g2[0].get_shape(), (1, 2))
self.assertEqual(s2[0].c.get_shape(), (1, 2))
self.assertEqual(s2[0].h.get_shape(), (1, 2))
self.assertEqual(s2[1].c.get_shape(), (1, 2))
self.assertEqual(s2[1].h.get_shape(), (1, 2))
res_g2, res_s2 = sess.run([g2, s2],
{x: np.array([[2., 2., 2.]]),
m: res_s})
self.assertEqual(res_g2[0].shape, (1, 2))
self.assertEqual(res_s2[0].c.shape, (1, 2))
self.assertEqual(res_s2[0].h.shape, (1, 2))
self.assertEqual(res_s2[1].c.shape, (1, 2))
self.assertEqual(res_s2[1].h.shape, (1, 2))
self.assertAllClose(res_g2[0], [[0.58847463, 0.58847463]])
self.assertAllClose(
res_s2, (([[1.40469193, 1.40469193]], [[0.58847463, 0.58847463]]),
([[0.97726452, 1.04626071]], [[0.4927212, 0.51137757]])))
def testGrid2BasicLSTMCellTied(self):
with self.test_session(use_gpu=False) as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.2)):
x = array_ops.zeros([1, 3])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),
(array_ops.zeros([1, 2]), array_ops.zeros([1, 2])))
cell = grid_rnn_cell.Grid2BasicLSTMCell(2, tied=True)
self.assertEqual(cell.state_size, ((2, 2), (2, 2)))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
self.assertEqual(s[1].c.get_shape(), (1, 2))
self.assertEqual(s[1].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x:
np.array([[1., 1., 1.]]),
m: ((np.array([[0.1, 0.2]]), np.array([[0.3, 0.4]])),
(np.array([[0.5, 0.6]]), np.array([[0.7, 0.8]])))
})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertEqual(res_s[0].c.shape, (1, 2))
self.assertEqual(res_s[0].h.shape, (1, 2))
self.assertEqual(res_s[1].c.shape, (1, 2))
self.assertEqual(res_s[1].h.shape, (1, 2))
self.assertAllClose(res_g[0], [[0.36617181, 0.36617181]])
self.assertAllClose(
res_s, (([[0.71053141, 0.71053141]], [[0.36617181, 0.36617181]]),
([[0.72320831, 0.80555487]], [[0.39102408, 0.42150158]])))
res_g, res_s = sess.run([g, s], {x: np.array([[1., 1., 1.]]), m: res_s})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertAllClose(res_g[0], [[0.36703536, 0.36703536]])
self.assertAllClose(
res_s, (([[0.71200621, 0.71200621]], [[0.36703536, 0.36703536]]),
([[0.80941606, 0.87550586]], [[0.40108523, 0.42199609]])))
def testGrid2BasicLSTMCellWithRelu(self):
with self.test_session(use_gpu=False) as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.2)):
x = array_ops.zeros([1, 3])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),)
cell = grid_rnn_cell.Grid2BasicLSTMCell(
2, tied=False, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, ((2, 2),))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: ((np.array([[0.1, 0.2]]), np.array([[0.3, 0.4]])),)
})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertAllClose(res_g[0], [[0.31667367, 0.31667367]])
self.assertAllClose(res_s, (([[0.29530135, 0.37520045]],
[[0.17044567, 0.21292259]]),))
"""LSTMCell
"""
def testGrid2LSTMCell(self):
with self.test_session(use_gpu=False) as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),
(array_ops.zeros([1, 2]), array_ops.zeros([1, 2])))
cell = grid_rnn_cell.Grid2LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, ((2, 2), (2, 2)))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
self.assertEqual(s[1].c.get_shape(), (1, 2))
self.assertEqual(s[1].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x:
np.array([[1., 1., 1.]]),
m: ((np.array([[0.1, 0.2]]), np.array([[0.3, 0.4]])),
(np.array([[0.5, 0.6]]), np.array([[0.7, 0.8]])))
})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertEqual(res_s[0].c.shape, (1, 2))
self.assertEqual(res_s[0].h.shape, (1, 2))
self.assertEqual(res_s[1].c.shape, (1, 2))
self.assertEqual(res_s[1].h.shape, (1, 2))
self.assertAllClose(res_g[0], [[0.95686918, 0.95686918]])
self.assertAllClose(
res_s, (([[2.41515064, 2.41515064]], [[0.95686918, 0.95686918]]),
([[1.38917875, 1.49043763]], [[0.83884692, 0.86036491]])))
def testGrid2LSTMCellTied(self):
with self.test_session(use_gpu=False) as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),
(array_ops.zeros([1, 2]), array_ops.zeros([1, 2])))
cell = grid_rnn_cell.Grid2LSTMCell(2, tied=True, use_peepholes=True)
self.assertEqual(cell.state_size, ((2, 2), (2, 2)))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
self.assertEqual(s[1].c.get_shape(), (1, 2))
self.assertEqual(s[1].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x:
np.array([[1., 1., 1.]]),
m: ((np.array([[0.1, 0.2]]), np.array([[0.3, 0.4]])),
(np.array([[0.5, 0.6]]), np.array([[0.7, 0.8]])))
})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertEqual(res_s[0].c.shape, (1, 2))
self.assertEqual(res_s[0].h.shape, (1, 2))
self.assertEqual(res_s[1].c.shape, (1, 2))
self.assertEqual(res_s[1].h.shape, (1, 2))
self.assertAllClose(res_g[0], [[0.95686918, 0.95686918]])
self.assertAllClose(
res_s, (([[2.41515064, 2.41515064]], [[0.95686918, 0.95686918]]),
([[1.38917875, 1.49043763]], [[0.83884692, 0.86036491]])))
def testGrid2LSTMCellWithRelu(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),)
cell = grid_rnn_cell.Grid2LSTMCell(
2, use_peepholes=True, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, ((2, 2),))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: ((np.array([[0.1, 0.2]]), np.array([[0.3, 0.4]])),)
})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertAllClose(res_g[0], [[2.1831727, 2.1831727]])
self.assertAllClose(res_s, (([[0.92270052, 1.02325559]],
[[0.66159075, 0.70475441]]),))
"""RNNCell
"""
def testGrid2BasicRNNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([2, 2])
m = (array_ops.zeros([2, 2]), array_ops.zeros([2, 2]))
cell = grid_rnn_cell.Grid2BasicRNNCell(2)
self.assertEqual(cell.state_size, (2, 2))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (2, 2))
self.assertEqual(s[0].get_shape(), (2, 2))
self.assertEqual(s[1].get_shape(), (2, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x:
np.array([[1., 1.], [2., 2.]]),
m: (np.array([[0.1, 0.1], [0.2, 0.2]]), np.array([[0.1, 0.1],
[0.2, 0.2]]))
})
self.assertEqual(res_g[0].shape, (2, 2))
self.assertEqual(res_s[0].shape, (2, 2))
self.assertEqual(res_s[1].shape, (2, 2))
self.assertAllClose(res_g, ([[0.94685763, 0.94685763],
[0.99480951, 0.99480951]],))
self.assertAllClose(
res_s, ([[0.94685763, 0.94685763], [0.99480951, 0.99480951]],
[[0.80049908, 0.80049908], [0.97574311, 0.97574311]]))
def testGrid2BasicRNNCellTied(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([2, 2])
m = (array_ops.zeros([2, 2]), array_ops.zeros([2, 2]))
cell = grid_rnn_cell.Grid2BasicRNNCell(2, tied=True)
self.assertEqual(cell.state_size, (2, 2))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (2, 2))
self.assertEqual(s[0].get_shape(), (2, 2))
self.assertEqual(s[1].get_shape(), (2, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x:
np.array([[1., 1.], [2., 2.]]),
m: (np.array([[0.1, 0.1], [0.2, 0.2]]), np.array([[0.1, 0.1],
[0.2, 0.2]]))
})
self.assertEqual(res_g[0].shape, (2, 2))
self.assertEqual(res_s[0].shape, (2, 2))
self.assertEqual(res_s[1].shape, (2, 2))
self.assertAllClose(res_g, ([[0.94685763, 0.94685763],
[0.99480951, 0.99480951]],))
self.assertAllClose(
res_s, ([[0.94685763, 0.94685763], [0.99480951, 0.99480951]],
[[0.80049908, 0.80049908], [0.97574311, 0.97574311]]))
def testGrid2BasicRNNCellWithRelu(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = (array_ops.zeros([1, 2]),)
cell = grid_rnn_cell.Grid2BasicRNNCell(2, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, (2,))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run(
[g, s], {x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1]])})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertEqual(res_s[0].shape, (1, 2))
self.assertAllClose(res_g, ([[1.80049896, 1.80049896]],))
self.assertAllClose(res_s, ([[0.80049896, 0.80049896]],))
"""1-LSTM
"""
def testGrid1LSTMCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)) as root_scope:
x = array_ops.zeros([1, 3])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),)
cell = grid_rnn_cell.Grid1LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, ((2, 2),))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: ((np.array([[0.1, 0.2]]), np.array([[0.3, 0.4]])),)
})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertEqual(res_s[0].c.shape, (1, 2))
self.assertEqual(res_s[0].h.shape, (1, 2))
self.assertAllClose(res_g, ([[0.91287315, 0.91287315]],))
self.assertAllClose(res_s, (([[2.26285243, 2.26285243]],
[[0.91287315, 0.91287315]]),))
root_scope.reuse_variables()
x2 = array_ops.zeros([0, 0])
g2, s2 = cell(x2, m)
self.assertEqual(g2[0].get_shape(), (1, 2))
self.assertEqual(s2[0].c.get_shape(), (1, 2))
self.assertEqual(s2[0].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g2, res_s2 = sess.run([g2, s2], {m: res_s})
self.assertEqual(res_g2[0].shape, (1, 2))
self.assertEqual(res_s2[0].c.shape, (1, 2))
self.assertEqual(res_s2[0].h.shape, (1, 2))
self.assertAllClose(res_g2, ([[0.9032144, 0.9032144]],))
self.assertAllClose(res_s2, (([[2.79966092, 2.79966092]],
[[0.9032144, 0.9032144]]),))
g3, s3 = cell(x2, m)
self.assertEqual(g3[0].get_shape(), (1, 2))
self.assertEqual(s3[0].c.get_shape(), (1, 2))
self.assertEqual(s3[0].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g3, res_s3 = sess.run([g3, s3], {m: res_s2})
self.assertEqual(res_g3[0].shape, (1, 2))
self.assertEqual(res_s3[0].c.shape, (1, 2))
self.assertEqual(res_s3[0].h.shape, (1, 2))
self.assertAllClose(res_g3, ([[0.92727238, 0.92727238]],))
self.assertAllClose(res_s3, (([[3.3529923, 3.3529923]],
[[0.92727238, 0.92727238]]),))
"""3-LSTM
"""
def testGrid3LSTMCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),
(array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),
(array_ops.zeros([1, 2]), array_ops.zeros([1, 2])))
cell = grid_rnn_cell.Grid3LSTMCell(2, use_peepholes=True)
self.assertEqual(cell.state_size, ((2, 2), (2, 2), (2, 2)))
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (1, 2))
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
self.assertEqual(s[1].c.get_shape(), (1, 2))
self.assertEqual(s[1].h.get_shape(), (1, 2))
self.assertEqual(s[2].c.get_shape(), (1, 2))
self.assertEqual(s[2].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x:
np.array([[1., 1., 1.]]),
m: ((np.array([[0.1, 0.2]]), np.array([[0.3, 0.4]])),
(np.array([[0.5, 0.6]]), np.array([[0.7, 0.8]])), (np.array(
[[-0.1, -0.2]]), np.array([[-0.3, -0.4]])))
})
self.assertEqual(res_g[0].shape, (1, 2))
self.assertEqual(res_s[0].c.shape, (1, 2))
self.assertEqual(res_s[0].h.shape, (1, 2))
self.assertEqual(res_s[1].c.shape, (1, 2))
self.assertEqual(res_s[1].h.shape, (1, 2))
self.assertEqual(res_s[2].c.shape, (1, 2))
self.assertEqual(res_s[2].h.shape, (1, 2))
self.assertAllClose(res_g, ([[0.96892911, 0.96892911]],))
self.assertAllClose(
res_s, (([[2.45227885, 2.45227885]], [[0.96892911, 0.96892911]]),
([[1.33592629, 1.4373529]], [[0.80867189, 0.83247656]]),
([[0.7317788, 0.63205892]], [[0.56548983, 0.50446129]])))
"""Edge cases
"""
def testGridRNNEdgeCasesLikeRelu(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([3, 2])
m = ()
# this is equivalent to relu
cell = grid_rnn_cell.GridRNNCell(
num_units=2,
num_dims=1,
input_dims=0,
output_dims=0,
non_recurrent_dims=0,
non_recurrent_fn=nn_ops.relu)
g, s = cell(x, m)
self.assertEqual(g[0].get_shape(), (3, 2))
self.assertEqual(s, ())
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s],
{x: np.array([[1., -1.], [-2, 1], [2, -1]])})
self.assertEqual(res_g[0].shape, (3, 2))
self.assertEqual(res_s, ())
self.assertAllClose(res_g, ([[0, 0], [0, 0], [0.5, 0.5]],))
def testGridRNNEdgeCasesNoOutput(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = ((array_ops.zeros([1, 2]), array_ops.zeros([1, 2])),)
# This cell produces no output
cell = grid_rnn_cell.GridRNNCell(
num_units=2,
num_dims=2,
input_dims=0,
output_dims=None,
non_recurrent_dims=0,
non_recurrent_fn=nn_ops.relu)
g, s = cell(x, m)
self.assertEqual(g, ())
self.assertEqual(s[0].c.get_shape(), (1, 2))
self.assertEqual(s[0].h.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res_g, res_s = sess.run([g, s], {
x: np.array([[1., 1.]]),
m: ((np.array([[0.1, 0.1]]), np.array([[0.1, 0.1]])),)
})
self.assertEqual(res_g, ())
self.assertEqual(res_s[0].c.shape, (1, 2))
self.assertEqual(res_s[0].h.shape, (1, 2))
"""Test with tf.nn.rnn
"""
def testGrid2LSTMCellWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid2LSTMCell(num_units=num_units)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state[0].c.get_shape(), (batch_size, 2))
self.assertEqual(state[0].h.get_shape(), (batch_size, 2))
self.assertEqual(state[1].c.get_shape(), (batch_size, 2))
self.assertEqual(state[1].h.get_shape(), (batch_size, 2))
for out, inp in zip(outputs, inputs):
self.assertEqual(len(out), 1)
self.assertEqual(out[0].get_shape()[0], inp.get_shape()[0])
self.assertEqual(out[0].get_shape()[1], num_units)
self.assertEqual(out[0].dtype, inp.dtype)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for tp in values[:-1]:
for v in tp:
self.assertTrue(np.all(np.isfinite(v)))
for tp in values[-1]:
for st in tp:
for v in st:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid2LSTMCellReLUWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid2LSTMCell(
num_units=num_units, non_recurrent_fn=nn_ops.relu)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state[0].c.get_shape(), (batch_size, 2))
self.assertEqual(state[0].h.get_shape(), (batch_size, 2))
for out, inp in zip(outputs, inputs):
self.assertEqual(len(out), 1)
self.assertEqual(out[0].get_shape()[0], inp.get_shape()[0])
self.assertEqual(out[0].get_shape()[1], num_units)
self.assertEqual(out[0].dtype, inp.dtype)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for tp in values[:-1]:
for v in tp:
self.assertTrue(np.all(np.isfinite(v)))
for tp in values[-1]:
for st in tp:
for v in st:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid3LSTMCellReLUWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid3LSTMCell(
num_units=num_units, non_recurrent_fn=nn_ops.relu)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state[0].c.get_shape(), (batch_size, 2))
self.assertEqual(state[0].h.get_shape(), (batch_size, 2))
self.assertEqual(state[1].c.get_shape(), (batch_size, 2))
self.assertEqual(state[1].h.get_shape(), (batch_size, 2))
for out, inp in zip(outputs, inputs):
self.assertEqual(len(out), 1)
self.assertEqual(out[0].get_shape()[0], inp.get_shape()[0])
self.assertEqual(out[0].get_shape()[1], num_units)
self.assertEqual(out[0].dtype, inp.dtype)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for tp in values[:-1]:
for v in tp:
self.assertTrue(np.all(np.isfinite(v)))
for tp in values[-1]:
for st in tp:
for v in st:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid1LSTMCellWithRNN(self):
batch_size = 3
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid1LSTMCell(num_units=num_units)
# for 1-LSTM, we only feed the first step
inputs = ([
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
] + (max_length - 1) * [array_ops.zeros([batch_size, input_size])])
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
self.assertEqual(state[0].c.get_shape(), (batch_size, 2))
self.assertEqual(state[0].h.get_shape(), (batch_size, 2))
for out, inp in zip(outputs, inputs):
self.assertEqual(len(out), 1)
self.assertEqual(out[0].get_shape(), (3, num_units))
self.assertEqual(out[0].dtype, inp.dtype)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((batch_size, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for tp in values[:-1]:
for v in tp:
self.assertTrue(np.all(np.isfinite(v)))
for tp in values[-1]:
for st in tp:
for v in st:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid2LSTMCellWithRNNAndDynamicBatchSize(self):
"""Test for #4296."""
input_size = 5
max_length = 6 # unrolled up to this length
num_units = 2
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
cell = grid_rnn_cell.Grid2LSTMCell(num_units=num_units)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(len(out), 1)
self.assertTrue(out[0].get_shape().dims[0].value is None)
self.assertEqual(out[0].get_shape().dims[1], num_units)
self.assertEqual(out[0].dtype, inp.dtype)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
input_value = np.ones((3, input_size))
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for tp in values[:-1]:
for v in tp:
self.assertTrue(np.all(np.isfinite(v)))
for tp in values[-1]:
for st in tp:
for v in st:
self.assertTrue(np.all(np.isfinite(v)))
def testGrid2LSTMCellLegacy(self):
"""Test for legacy case (when state_is_tuple=False)."""
with self.cached_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 8])
cell = grid_rnn_cell.Grid2LSTMCell(
2, use_peepholes=True, state_is_tuple=False, output_is_tuple=False)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])
})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.95686918, 0.95686918]])
self.assertAllClose(res[1], [[
2.41515064, 2.41515064, 0.95686918, 0.95686918, 1.38917875,
1.49043763, 0.83884692, 0.86036491
]])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/grid_rnn/python/kernel_tests/grid_rnn_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/grid_rnn/python/ops/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing GridRNN cells"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.contrib import layers
from tensorflow.contrib import rnn
class GridRNNCell(rnn.RNNCell):
"""Grid recurrent cell.
This implementation is based on:
http://arxiv.org/pdf/1507.01526v3.pdf
This is the generic implementation of GridRNN. Users can specify arbitrary
number of dimensions,
set some of them to be priority (section 3.2), non-recurrent (section 3.3)
and input/output dimensions (section 3.4).
Weight sharing can also be specified using the `tied` parameter.
Type of recurrent units can be specified via `cell_fn`.
"""
def __init__(self,
num_units,
num_dims=1,
input_dims=None,
output_dims=None,
priority_dims=None,
non_recurrent_dims=None,
tied=False,
cell_fn=None,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True):
"""Initialize the parameters of a Grid RNN cell
Args:
num_units: int, The number of units in all dimensions of this GridRNN cell
num_dims: int, Number of dimensions of this grid.
input_dims: int or list, List of dimensions which will receive input data.
output_dims: int or list, List of dimensions from which the output will be
recorded.
priority_dims: int or list, List of dimensions to be considered as
priority dimensions.
If None, no dimension is prioritized.
non_recurrent_dims: int or list, List of dimensions that are not
recurrent.
The transfer function for non-recurrent dimensions is specified
via `non_recurrent_fn`, which is
default to be `tensorflow.nn.relu`.
tied: bool, Whether to share the weights among the dimensions of this
GridRNN cell.
If there are non-recurrent dimensions in the grid, weights are
shared between each group of recurrent and non-recurrent
dimensions.
cell_fn: function, a function which returns the recurrent cell object.
Has to be in the following signature:
```
def cell_func(num_units):
# ...
```
and returns an object of type `RNNCell`. If None, LSTMCell with
default parameters will be used.
Note that if you use a custom RNNCell (with `cell_fn`), it is your
responsibility to make sure the inner cell use `state_is_tuple=True`.
non_recurrent_fn: a tensorflow Op that will be the transfer function of
the non-recurrent dimensions
state_is_tuple: If True, accepted and returned states are tuples of the
states of the recurrent dimensions. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
Note that if you use a custom RNNCell (with `cell_fn`), it is your
responsibility to make sure the inner cell use `state_is_tuple=True`.
output_is_tuple: If True, the output is a tuple of the outputs of the
recurrent dimensions. If False, they are concatenated along the
column axis. The later behavior will soon be deprecated.
Raises:
TypeError: if cell_fn does not return an RNNCell instance.
"""
if not state_is_tuple:
logging.warning('%s: Using a concatenated state is slower and will '
'soon be deprecated. Use state_is_tuple=True.', self)
if not output_is_tuple:
logging.warning('%s: Using a concatenated output is slower and will '
'soon be deprecated. Use output_is_tuple=True.', self)
if num_dims < 1:
raise ValueError('dims must be >= 1: {}'.format(num_dims))
self._config = _parse_rnn_config(num_dims, input_dims, output_dims,
priority_dims, non_recurrent_dims,
non_recurrent_fn or nn.relu, tied,
num_units)
self._state_is_tuple = state_is_tuple
self._output_is_tuple = output_is_tuple
if cell_fn is None:
my_cell_fn = functools.partial(
rnn.LSTMCell, num_units=num_units, state_is_tuple=state_is_tuple)
else:
my_cell_fn = lambda: cell_fn(num_units)
if tied:
self._cells = [my_cell_fn()] * num_dims
else:
self._cells = [my_cell_fn() for _ in range(num_dims)]
if not isinstance(self._cells[0], rnn.RNNCell):
raise TypeError('cell_fn must return an RNNCell instance, saw: %s' %
type(self._cells[0]))
if self._output_is_tuple:
self._output_size = tuple(self._cells[0].output_size
for _ in self._config.outputs)
else:
self._output_size = self._cells[0].output_size * len(self._config.outputs)
if self._state_is_tuple:
self._state_size = tuple(self._cells[0].state_size
for _ in self._config.recurrents)
else:
self._state_size = self._cell_state_size() * len(self._config.recurrents)
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of GridRNN.
Args:
inputs: input Tensor, 2D, batch x input_size. Or None
state: state Tensor, 2D, batch x state_size. Note that state_size =
cell_state_size * recurrent_dims
scope: VariableScope for the created subgraph; defaults to "GridRNNCell".
Returns:
A tuple containing:
- A 2D, batch x output_size, Tensor representing the output of the cell
after reading "inputs" when previous state was "state".
- A 2D, batch x state_size, Tensor representing the new state of the cell
after reading "inputs" when previous state was "state".
"""
conf = self._config
dtype = inputs.dtype
c_prev, m_prev, cell_output_size = self._extract_states(state)
new_output = [None] * conf.num_dims
new_state = [None] * conf.num_dims
with vs.variable_scope(scope or type(self).__name__): # GridRNNCell
# project input, populate c_prev and m_prev
self._project_input(inputs, c_prev, m_prev, cell_output_size > 0)
# propagate along dimensions, first for non-priority dimensions
# then priority dimensions
_propagate(conf.non_priority, conf, self._cells, c_prev, m_prev,
new_output, new_state, True)
_propagate(conf.priority, conf, self._cells,
c_prev, m_prev, new_output, new_state, False)
# collect outputs and states
output_tensors = [new_output[i] for i in self._config.outputs]
if self._output_is_tuple:
output = tuple(output_tensors)
else:
if output_tensors:
output = array_ops.concat(output_tensors, 1)
else:
output = array_ops.zeros([0, 0], dtype)
if self._state_is_tuple:
states = tuple(new_state[i] for i in self._config.recurrents)
else:
# concat each state first, then flatten the whole thing
state_tensors = [
x for i in self._config.recurrents for x in new_state[i]
]
if state_tensors:
states = array_ops.concat(state_tensors, 1)
else:
states = array_ops.zeros([0, 0], dtype)
return output, states
def _extract_states(self, state):
"""Extract the cell and previous output tensors from the given state.
Args:
state: The RNN state.
Returns:
Tuple of the cell value, previous output, and cell_output_size.
Raises:
ValueError: If len(self._config.recurrents) != len(state).
"""
conf = self._config
# c_prev is `m` (cell value), and
# m_prev is `h` (previous output) in the paper.
# Keeping c and m here for consistency with the codebase
c_prev = [None] * conf.num_dims
m_prev = [None] * conf.num_dims
# for LSTM : state = memory cell + output, hence cell_output_size > 0
# for GRU/RNN: state = output (whose size is equal to _num_units),
# hence cell_output_size = 0
total_cell_state_size = self._cell_state_size()
cell_output_size = total_cell_state_size - conf.num_units
if self._state_is_tuple:
if len(conf.recurrents) != len(state):
raise ValueError('Expected state as a tuple of {} '
'element'.format(len(conf.recurrents)))
for recurrent_dim, recurrent_state in zip(conf.recurrents, state):
if cell_output_size > 0:
c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state
else:
m_prev[recurrent_dim] = recurrent_state
else:
for recurrent_dim, start_idx in zip(conf.recurrents,
range(0, self.state_size,
total_cell_state_size)):
if cell_output_size > 0:
c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],
[-1, conf.num_units])
m_prev[recurrent_dim] = array_ops.slice(
state, [0, start_idx + conf.num_units], [-1, cell_output_size])
else:
m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],
[-1, conf.num_units])
return c_prev, m_prev, cell_output_size
def _project_input(self, inputs, c_prev, m_prev, with_c):
"""Fills in c_prev and m_prev with projected input, for input dimensions.
Args:
inputs: inputs tensor
c_prev: cell value
m_prev: previous output
with_c: boolean; whether to include project_c.
Raises:
ValueError: if len(self._config.input) != len(inputs)
"""
conf = self._config
if (inputs is not None and
tensor_shape.dimension_value(inputs.shape.with_rank(2)[1]) > 0 and
conf.inputs):
if isinstance(inputs, tuple):
if len(conf.inputs) != len(inputs):
raise ValueError('Expect inputs as a tuple of {} '
'tensors'.format(len(conf.inputs)))
input_splits = inputs
else:
input_splits = array_ops.split(
value=inputs, num_or_size_splits=len(conf.inputs), axis=1)
input_sz = tensor_shape.dimension_value(
input_splits[0].shape.with_rank(2)[1])
for i, j in enumerate(conf.inputs):
input_project_m = vs.get_variable(
'project_m_{}'.format(j), [input_sz, conf.num_units],
dtype=inputs.dtype)
m_prev[j] = math_ops.matmul(input_splits[i], input_project_m)
if with_c:
input_project_c = vs.get_variable(
'project_c_{}'.format(j), [input_sz, conf.num_units],
dtype=inputs.dtype)
c_prev[j] = math_ops.matmul(input_splits[i], input_project_c)
def _cell_state_size(self):
"""Total size of the state of the inner cell used in this grid.
Returns:
Total size of the state of the inner cell.
"""
state_sizes = self._cells[0].state_size
if isinstance(state_sizes, tuple):
return sum(state_sizes)
return state_sizes
"""Specialized cells, for convenience
"""
class Grid1BasicRNNCell(GridRNNCell):
"""1D BasicRNN cell"""
def __init__(self, num_units, state_is_tuple=True, output_is_tuple=True):
super(Grid1BasicRNNCell, self).__init__(
num_units=num_units,
num_dims=1,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=False,
cell_fn=lambda n: rnn.BasicRNNCell(num_units=n),
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid2BasicRNNCell(GridRNNCell):
"""2D BasicRNN cell
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True):
super(Grid2BasicRNNCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=lambda n: rnn.BasicRNNCell(num_units=n),
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid1BasicLSTMCell(GridRNNCell):
"""1D BasicLSTM cell."""
def __init__(self,
num_units,
forget_bias=1,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.BasicLSTMCell(num_units=n, forget_bias=forget_bias)
super(Grid1BasicLSTMCell, self).__init__(
num_units=num_units,
num_dims=1,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=False,
cell_fn=cell_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid2BasicLSTMCell(GridRNNCell):
"""2D BasicLSTM cell.
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
forget_bias=1,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.BasicLSTMCell(num_units=n, forget_bias=forget_bias)
super(Grid2BasicLSTMCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=cell_fn,
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid1LSTMCell(GridRNNCell):
"""1D LSTM cell.
This is different from Grid1BasicLSTMCell because it gives options to
specify the forget bias and enabling peepholes.
"""
def __init__(self,
num_units,
use_peepholes=False,
forget_bias=1.0,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.LSTMCell(
num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)
super(Grid1LSTMCell, self).__init__(
num_units=num_units,
num_dims=1,
input_dims=0,
output_dims=0,
priority_dims=0,
cell_fn=cell_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid2LSTMCell(GridRNNCell):
"""2D LSTM cell.
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
use_peepholes=False,
forget_bias=1.0,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.LSTMCell(
num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)
super(Grid2LSTMCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=cell_fn,
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid3LSTMCell(GridRNNCell):
"""3D BasicLSTM cell.
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
The second and third dimensions are LSTM.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
use_peepholes=False,
forget_bias=1.0,
state_is_tuple=True,
output_is_tuple=True):
def cell_fn(n):
return rnn.LSTMCell(
num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)
super(Grid3LSTMCell, self).__init__(
num_units=num_units,
num_dims=3,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=cell_fn,
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
class Grid2GRUCell(GridRNNCell):
"""2D LSTM cell.
This creates a 2D cell which receives input and gives output in the first
dimension.
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is
specified.
"""
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True):
super(Grid2GRUCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=lambda n: rnn.GRUCell(num_units=n),
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
# Helpers
_GridRNNDimension = namedtuple('_GridRNNDimension', [
'idx', 'is_input', 'is_output', 'is_priority', 'non_recurrent_fn'
])
_GridRNNConfig = namedtuple('_GridRNNConfig',
['num_dims', 'dims', 'inputs', 'outputs',
'recurrents', 'priority', 'non_priority', 'tied',
'num_units'])
def _parse_rnn_config(num_dims, ls_input_dims, ls_output_dims, ls_priority_dims,
ls_non_recurrent_dims, non_recurrent_fn, tied, num_units):
def check_dim_list(ls):
if ls is None:
ls = []
if not isinstance(ls, (list, tuple)):
ls = [ls]
ls = sorted(set(ls))
if any(_ < 0 or _ >= num_dims for _ in ls):
raise ValueError('Invalid dims: {}. Must be in [0, {})'.format(ls,
num_dims))
return ls
input_dims = check_dim_list(ls_input_dims)
output_dims = check_dim_list(ls_output_dims)
priority_dims = check_dim_list(ls_priority_dims)
non_recurrent_dims = check_dim_list(ls_non_recurrent_dims)
rnn_dims = []
for i in range(num_dims):
rnn_dims.append(
_GridRNNDimension(
idx=i,
is_input=(i in input_dims),
is_output=(i in output_dims),
is_priority=(i in priority_dims),
non_recurrent_fn=non_recurrent_fn
if i in non_recurrent_dims else None))
return _GridRNNConfig(
num_dims=num_dims,
dims=rnn_dims,
inputs=input_dims,
outputs=output_dims,
recurrents=[x for x in range(num_dims) if x not in non_recurrent_dims],
priority=priority_dims,
non_priority=[x for x in range(num_dims) if x not in priority_dims],
tied=tied,
num_units=num_units)
def _propagate(dim_indices, conf, cells, c_prev, m_prev, new_output, new_state,
first_call):
"""Propagates through all the cells in dim_indices dimensions.
"""
if len(dim_indices) == 0:
return
# Because of the way RNNCells are implemented, we take the last dimension
# (H_{N-1}) out and feed it as the state of the RNN cell
# (in `last_dim_output`).
# The input of the cell (H_0 to H_{N-2}) are concatenated into `cell_inputs`
if conf.num_dims > 1:
ls_cell_inputs = [None] * (conf.num_dims - 1)
for d in conf.dims[:-1]:
if new_output[d.idx] is None:
ls_cell_inputs[d.idx] = m_prev[d.idx]
else:
ls_cell_inputs[d.idx] = new_output[d.idx]
cell_inputs = array_ops.concat(ls_cell_inputs, 1)
else:
cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0],
m_prev[0].dtype)
last_dim_output = (new_output[-1]
if new_output[-1] is not None else m_prev[-1])
for i in dim_indices:
d = conf.dims[i]
if d.non_recurrent_fn:
if conf.num_dims > 1:
linear_args = array_ops.concat([cell_inputs, last_dim_output], 1)
else:
linear_args = last_dim_output
with vs.variable_scope('non_recurrent' if conf.tied else
'non_recurrent/cell_{}'.format(i)):
if conf.tied and not (first_call and i == dim_indices[0]):
vs.get_variable_scope().reuse_variables()
new_output[d.idx] = layers.fully_connected(
linear_args,
num_outputs=conf.num_units,
activation_fn=d.non_recurrent_fn,
weights_initializer=(vs.get_variable_scope().initializer or
layers.initializers.xavier_initializer),
weights_regularizer=vs.get_variable_scope().regularizer)
else:
if c_prev[i] is not None:
cell_state = (c_prev[i], last_dim_output)
else:
# for GRU/RNN, the state is just the previous output
cell_state = last_dim_output
with vs.variable_scope('recurrent' if conf.tied else
'recurrent/cell_{}'.format(i)):
if conf.tied and not (first_call and i == dim_indices[0]):
vs.get_variable_scope().reuse_variables()
cell = cells[i]
new_output[d.idx], new_state[d.idx] = cell(cell_inputs, cell_state)
|
tensorflow-master
|
tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.decode_audio_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import six
from tensorflow.contrib import ffmpeg
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DecodeAudioOpTest(test.TestCase):
def _loadFileAndTest(self, filename, file_format, duration_sec,
samples_per_second, channel_count,
samples_per_second_tensor=None, feed_dict=None,
stream=None):
"""Loads an audio file and validates the output tensor.
Args:
filename: The filename of the input file.
file_format: The format of the input file.
duration_sec: The duration of the audio contained in the file in seconds.
samples_per_second: The desired sample rate in the output tensor.
channel_count: The desired channel count in the output tensor.
samples_per_second_tensor: The value to pass to the corresponding
parameter in the instantiated `decode_audio` op. If not
provided, will default to a constant value of
`samples_per_second`. Useful for providing a placeholder.
feed_dict: Used when evaluating the `decode_audio` op. If not
provided, will be empty. Useful when providing a placeholder for
`samples_per_second_tensor`.
stream: A string specifying which stream from the content file
should be decoded. The default value is '' which leaves the
decision to ffmpeg.
"""
if samples_per_second_tensor is None:
samples_per_second_tensor = samples_per_second
with self.cached_session():
path = os.path.join(resource_loader.get_data_files_path(), 'testdata',
filename)
with open(path, 'rb') as f:
contents = f.read()
audio_op = ffmpeg.decode_audio(
contents,
file_format=file_format,
samples_per_second=samples_per_second_tensor,
channel_count=channel_count, stream=stream)
audio = audio_op.eval(feed_dict=feed_dict or {})
self.assertEqual(len(audio.shape), 2)
self.assertNear(
duration_sec * samples_per_second,
audio.shape[0],
# Duration should be specified within 10%:
0.1 * audio.shape[0])
self.assertEqual(audio.shape[1], channel_count)
def testStreamIdentifier(self):
# mono_16khz_mp3_32khz_aac.mp4 was generated from:
# ffmpeg -i tensorflow/contrib/ffmpeg/testdata/mono_16khz_mp3.mp4 \
# -i tensorflow/contrib/ffmpeg/testdata/mono_32khz_aac.mp4 \
# -strict -2 -map 0:a -map 1:a \
# tensorflow/contrib/ffmpeg/testdata/mono_16khz_mp3_32khz_aac.mp4
self._loadFileAndTest('mono_16khz_mp3_32khz_aac.mp4', 'mp4', 2.77, 20000,
1, stream='0')
self._loadFileAndTest('mono_16khz_mp3_32khz_aac.mp4', 'mp4', 2.77, 20000,
1, stream='1')
def testMonoMp3(self):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000, 1)
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000, 2)
def testMonoMp4Mp3Codec(self):
# mp3 compressed audio streams in mp4 container.
self._loadFileAndTest('mono_16khz_mp3.mp4', 'mp4', 2.77, 20000, 1)
self._loadFileAndTest('mono_16khz_mp3.mp4', 'mp4', 2.77, 20000, 2)
def testMonoMp4AacCodec(self):
# aac compressed audio streams in mp4 container.
self._loadFileAndTest('mono_32khz_aac.mp4', 'mp4', 2.77, 20000, 1)
self._loadFileAndTest('mono_32khz_aac.mp4', 'mp4', 2.77, 20000, 2)
def testStereoMp3(self):
self._loadFileAndTest('stereo_48khz.mp3', 'mp3', 0.79, 50000, 1)
self._loadFileAndTest('stereo_48khz.mp3', 'mp3', 0.79, 20000, 2)
def testStereoMp4Mp3Codec(self):
# mp3 compressed audio streams in mp4 container.
self._loadFileAndTest('stereo_48khz_mp3.mp4', 'mp4', 0.79, 50000, 1)
self._loadFileAndTest('stereo_48khz_mp3.mp4', 'mp4', 0.79, 20000, 2)
def testStereoMp4AacCodec(self):
# aac compressed audio streams in mp4 container.
self._loadFileAndTest('stereo_48khz_aac.mp4', 'mp4', 0.79, 50000, 1)
self._loadFileAndTest('stereo_48khz_aac.mp4', 'mp4', 0.79, 20000, 2)
def testMonoWav(self):
self._loadFileAndTest('mono_10khz.wav', 'wav', 0.57, 5000, 1)
self._loadFileAndTest('mono_10khz.wav', 'wav', 0.57, 10000, 4)
def testOgg(self):
self._loadFileAndTest('mono_10khz.ogg', 'ogg', 0.57, 10000, 1)
def testInvalidFile(self):
with self.cached_session():
contents = 'invalid file'
audio_op = ffmpeg.decode_audio(
contents,
file_format='wav',
samples_per_second=10000,
channel_count=2)
audio = audio_op.eval()
self.assertEqual(audio.shape, (0, 0))
def testSampleRatePlaceholder(self):
placeholder = array_ops.placeholder(dtypes.int32)
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000, 1,
samples_per_second_tensor=placeholder,
feed_dict={placeholder: 20000})
def testSampleRateBadType(self):
placeholder = array_ops.placeholder(dtypes.float32)
with self.assertRaises(TypeError):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000.0, 1,
samples_per_second_tensor=placeholder,
feed_dict={placeholder: 20000.0})
def testSampleRateBadValue_Zero(self):
placeholder = array_ops.placeholder(dtypes.int32)
with six.assertRaisesRegex(self, Exception,
r'samples_per_second must be positive'):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000.0, 1,
samples_per_second_tensor=placeholder,
feed_dict={placeholder: 0})
def testSampleRateBadValue_Negative(self):
placeholder = array_ops.placeholder(dtypes.int32)
with six.assertRaisesRegex(self, Exception,
r'samples_per_second must be positive'):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000.0, 1,
samples_per_second_tensor=placeholder,
feed_dict={placeholder: -2})
def testInvalidFileFormat(self):
with six.assertRaisesRegex(self, Exception,
r'file_format must be one of'):
self._loadFileAndTest('mono_16khz.mp3', 'docx', 0.57, 20000, 1)
def testStaticShapeInference_ConstantChannelCount(self):
with self.cached_session():
audio_op = ffmpeg.decode_audio(b'~~~ wave ~~~',
file_format='wav',
samples_per_second=44100,
channel_count=2)
self.assertEqual([None, 2], audio_op.shape.as_list())
def testStaticShapeInference_NonConstantChannelCount(self):
with self.cached_session():
channel_count = array_ops.placeholder(dtypes.int32)
audio_op = ffmpeg.decode_audio(b'~~~ wave ~~~',
file_format='wav',
samples_per_second=44100,
channel_count=channel_count)
self.assertEqual([None, None], audio_op.shape.as_list())
def testStaticShapeInference_ZeroChannelCountInvalid(self):
with self.cached_session():
with six.assertRaisesRegex(self, Exception,
r'channel_count must be positive'):
ffmpeg.decode_audio(b'~~~ wave ~~~',
file_format='wav',
samples_per_second=44100,
channel_count=0)
def testStaticShapeInference_NegativeChannelCountInvalid(self):
with self.cached_session():
with six.assertRaisesRegex(self, Exception,
r'channel_count must be positive'):
ffmpeg.decode_audio(b'~~~ wave ~~~',
file_format='wav',
samples_per_second=44100,
channel_count=-2)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/ffmpeg/decode_audio_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Working with audio using FFmpeg.
See the [FFMPEG](https://tensorflow.org/api_guides/python/contrib.ffmpeg) guide.
@@decode_audio
@@encode_audio
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ffmpeg.ffmpeg_ops import decode_audio
from tensorflow.contrib.ffmpeg.ffmpeg_ops import decode_video
from tensorflow.contrib.ffmpeg.ffmpeg_ops import encode_audio
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['decode_audio', 'encode_audio', 'decode_video']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/ffmpeg/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Encoding and decoding audio using FFmpeg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ffmpeg.ops import gen_decode_audio_op_py
from tensorflow.contrib.ffmpeg.ops import gen_decode_video_op_py
from tensorflow.contrib.ffmpeg.ops import gen_encode_audio_op_py
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.deprecation import deprecated
_ffmpeg_so = loader.load_op_library(
resource_loader.get_path_to_datafile('ffmpeg.so'))
@deprecated('2018-09-04',
'tf.contrib.ffmpeg will be removed in 2.0, the support for video '
'and audio will continue to be provided in tensorflow-io: '
'https://github.com/tensorflow/io')
def decode_audio(contents, file_format=None, samples_per_second=None,
channel_count=None, stream=None):
"""Create an op that decodes the contents of an audio file.
Note that ffmpeg is free to select the "best" audio track from an mp4.
https://trac.ffmpeg.org/wiki/Map
Args:
contents: The binary contents of the audio file to decode. This is a
scalar.
file_format: A string or scalar string tensor specifying which
format the contents will conform to. This can be mp3, mp4, ogg,
or wav.
samples_per_second: The number of samples per second that is
assumed, as an `int` or scalar `int32` tensor. In some cases,
resampling will occur to generate the correct sample rate.
channel_count: The number of channels that should be created from the
audio contents, as an `int` or scalar `int32` tensor. If the
`contents` have more than this number, then some channels will
be merged or dropped. If `contents` has fewer than this, then
additional channels will be created from the existing ones.
stream: A string specifying which stream from the content file
should be decoded, e.g., '0' means the 0-th stream.
The default value is '' which leaves the decision to ffmpeg.
Returns:
A rank-2 tensor that has time along dimension 0 and channels along
dimension 1. Dimension 0 will be `samples_per_second *
length_in_seconds` wide, and dimension 1 will be `channel_count`
wide. If ffmpeg fails to decode the audio then an empty tensor will
be returned.
"""
return gen_decode_audio_op_py.decode_audio_v2(
contents, file_format=file_format, samples_per_second=samples_per_second,
channel_count=channel_count, stream=stream)
ops.NotDifferentiable('DecodeAudio')
@deprecated('2018-09-04',
'tf.contrib.ffmpeg will be removed in 2.0, the support for video '
'and audio will continue to be provided in tensorflow-io: '
'https://github.com/tensorflow/io')
def encode_audio(audio, file_format=None, samples_per_second=None):
"""Creates an op that encodes an audio file using sampled audio from a tensor.
Args:
audio: A rank-2 `Tensor` that has time along dimension 0 and
channels along dimension 1. Dimension 0 is `samples_per_second *
length_in_seconds` long.
file_format: The type of file to encode, as a string or rank-0
string tensor. "wav" is the only supported format.
samples_per_second: The number of samples in the audio tensor per
second of audio, as an `int` or rank-0 `int32` tensor.
Returns:
A scalar tensor that contains the encoded audio in the specified file
format.
"""
return gen_encode_audio_op_py.encode_audio_v2(
audio,
file_format=file_format,
samples_per_second=samples_per_second,
bits_per_second=192000) # not used by WAV
ops.NotDifferentiable('EncodeAudio')
@deprecated('2018-09-04',
'tf.contrib.ffmpeg will be removed in 2.0, the support for video '
'and audio will continue to be provided in tensorflow-io: '
'https://github.com/tensorflow/io')
def decode_video(contents):
"""Create an op that decodes the contents of a video file.
Args:
contents: The binary contents of the video file to decode. This is a scalar.
Returns:
A rank-4 `Tensor` that has `[frames, height, width, 3]` RGB as output.
"""
return gen_decode_video_op_py.decode_video(contents)
ops.NotDifferentiable('DecodeVideo')
|
tensorflow-master
|
tensorflow/contrib/ffmpeg/ffmpeg_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.decode_video_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import six # pylint: disable=unused-import
from tensorflow.contrib import ffmpeg
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DecodeVideoOpTest(test.TestCase):
def _loadFileAndTest(self, filename, width, height, frames, bmp_filename,
index):
"""Loads an video file and validates the output tensor.
Args:
filename: The filename of the input file.
width: The width of the video.
height: The height of the video.
frames: The frames of the video.
bmp_filename: The filename for the bmp file.
index: Index location inside the video.
"""
with self.cached_session():
path = os.path.join(resource_loader.get_data_files_path(), 'testdata',
filename)
with open(path, 'rb') as f:
contents = f.read()
bmp_path = os.path.join(resource_loader.get_data_files_path(), 'testdata',
bmp_filename)
with open(bmp_path, 'rb') as f:
bmp_contents = f.read()
image_op = image_ops.decode_bmp(bmp_contents)
image = image_op.eval()
self.assertEqual(image.shape, (height, width, 3))
video_op = ffmpeg.decode_video(contents)
video = video_op.eval()
self.assertEqual(video.shape, (frames, height, width, 3))
# ffmpeg produces results where channels can be off 1.
self.assertAllClose(video[index, :, :, :], image, atol=1)
def testMp4(self):
self._loadFileAndTest('small.mp4', 560, 320, 166, 'small_100.bmp', 99)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/ffmpeg/decode_video_op_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.encode_audio_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import six
from tensorflow.contrib import ffmpeg
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class EncodeAudioOpTest(test.TestCase):
def setUp(self):
super(EncodeAudioOpTest, self).setUp()
path = os.path.join(resource_loader.get_data_files_path(),
'testdata/mono_10khz.wav')
with open(path, 'rb') as f:
self._contents = f.read()
def _compareWavFiles(self, original, encoded):
"""Compares the important bits of two WAV files.
Some encoders will create a slightly different header to the WAV file.
This compares only the important bits of the header as well as the contents.
Args:
original: Contents of the original .wav file.
encoded: Contents of the new, encoded .wav file.
"""
self.assertLess(44, len(original))
self.assertLess(44, len(encoded))
self.assertEqual(original[:4], encoded[:4])
# Skip file size
self.assertEqual(original[8:16], encoded[8:16])
# Skip header size
self.assertEqual(original[20:36], encoded[20:36])
# Skip extra bits inserted by ffmpeg.
self.assertEqual(original[original.find(b'data'):],
encoded[encoded.find(b'data'):])
def testRoundTrip(self):
"""Reads a wav file, writes it, and compares them."""
with self.cached_session():
audio_op = ffmpeg.decode_audio(
self._contents,
file_format='wav',
samples_per_second=10000,
channel_count=1)
encode_op = ffmpeg.encode_audio(
audio_op, file_format='wav', samples_per_second=10000)
encoded_contents = encode_op.eval()
self._compareWavFiles(self._contents, encoded_contents)
def testRoundTripWithPlaceholderSampleRate(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32)
audio_op = ffmpeg.decode_audio(
self._contents,
file_format='wav',
samples_per_second=placeholder,
channel_count=1)
encode_op = ffmpeg.encode_audio(
audio_op, file_format='wav', samples_per_second=placeholder)
encoded_contents = encode_op.eval(feed_dict={placeholder: 10000})
self._compareWavFiles(self._contents, encoded_contents)
def testFloatingPointSampleRateInvalid(self):
with self.cached_session():
with self.assertRaises(TypeError):
ffmpeg.encode_audio(
[[0.0], [1.0]],
file_format='wav',
samples_per_second=12345.678)
def testZeroSampleRateInvalid(self):
with self.cached_session() as sess:
encode_op = ffmpeg.encode_audio(
[[0.0], [1.0]],
file_format='wav',
samples_per_second=0)
with six.assertRaisesRegex(self, Exception, 'must be positive'):
sess.run(encode_op)
def testNegativeSampleRateInvalid(self):
with self.cached_session() as sess:
encode_op = ffmpeg.encode_audio(
[[0.0], [1.0]],
file_format='wav',
samples_per_second=-2)
with six.assertRaisesRegex(self, Exception, 'must be positive'):
sess.run(encode_op)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/ffmpeg/encode_audio_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to fused_conv2d_bias_activation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.fused_conv.python.ops.fused_conv2d_bias_activation_op import *
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, ['fused_conv2d_bias_activation'])
|
tensorflow-master
|
tensorflow/contrib/fused_conv/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/fused_conv/python/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op performing fused conv2d bias_add and relu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.fused_conv.ops import gen_fused_conv2d_bias_activation_op
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_fused_conv2d_bias_activation_op_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_fused_conv2d_bias_activation_op.so"))
# pylint: disable=redefined-builtin
def fused_conv2d_bias_activation(conv_input,
filter,
bias,
strides=None,
padding=None,
conv_input_scale=1.0,
side_input_scale=0.0,
side_input=None,
activation_mode="Relu",
data_format=None,
filter_format=None,
name=None):
"""Fused 2D conv, bias and activation with optional side input.
Computes a fused 2-D convolution scaled by conv_input_scale,
adds an optional side input scaled by side_input_scale, adds biases,
and applies ReLU. As an equation:
output = ReLU(conv_input_scale * Conv(conv_input, filter) +
side_input_scale * side_input + bias)
Note: In int8 mode, The ReLU will clip the output to the range [0..127].
Args:
conv_input: A `Tensor` of the format specified by `data_format`.
filter: A `Tensor` whose format depends on `data_format`:
if `data_format` is "NCHW_VECT_C", filter should be "OIHW_VECT_I"
otherwise, it should be "HWIO" format.
bias: A 1-D `Tensor` of type `float32`, and dimensions equal to the
number of output channels.
strides: A list of 4 `ints` specifying convolution strides.
if `data_format` is "NCHW" or "NCHW_VECT_C", the order should be NCHW.
if `data_format` is "NHWC", the order should be NHWC.
padding: A `string` from: `"SAME", "VALID"`.
conv_input_scale: A scalar `float32` that will be multiplied by conv_input.
This is optional and defaults to 1. However it should be set to
specify the quantization scale when `data_format` is "NCHW_VECT_C".
side_input_scale: A scalar `float32` that will be multiplied by side_input.
This is optional and defaults to 0.
side_input: A `Tensor` of the format specified by `data_format`.
This is useful for implementing ResNet blocks.
activation_mode: (optional) currently supports the default "Relu", or
"None" activation function.
Note: in qint8 mode, "None" actually clips to the range [-128, 127],
while "Relu" clips to the range [0, 127].
data_format: Specifies the data format.
Possible values are:
"NHWC" float [batch, height, width, channels]
"NCHW" float [batch, channels, height, width]
"NCHW_VECT_C" qint8 [batch, channels / 4, height, width, channels % 4]
Defaults to `"NHWC"`.
Performance is worst for `"NHWC"` and best for `"NCHW_VECT_C"`.
filter_format: Specifies the filter format.
Possible values are:
"HWIO" float [kernel_height, kernel_width, input_channels,
output_channels ]
"OIHW" float [output_channels, input_channels, kernel_height,
kernel_width ]
"OIHW_VECT_I" qint8 [ output_channels, input_channels / 4,
kernel_height, kernel_width, input_channels % 4 ]
Defaults to `"HWIO"`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the format specified by `data_format`.
"""
if strides is None:
strides = [1, 1, 1, 1]
if side_input is None:
side_input = []
return gen_fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
filter,
bias,
side_input,
conv_input_scale,
side_input_scale,
padding=padding,
strides=strides,
activation_mode=activation_mode,
data_format=data_format,
filter_format=filter_format,
name=name)
|
tensorflow-master
|
tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for fused conv2d bias and activation op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.fused_conv.python.ops import fused_conv2d_bias_activation_op
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def build_conv_bias_relu_graph(device, input_shape, filter_shape, strides,
padding, num_iters, data_format):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use.
num_iters: number of iterations to run conv2d.
data_format: data format string of input, 'NHWC' and 'NCHW' are
supported.
Returns:
An array of tensors to run()
"""
if data_format == "NCHW":
input_shape = [
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
]
with ops.device("/%s:0" % device):
inp = variables.Variable(random_ops.truncated_normal(input_shape))
filt = variables.Variable(random_ops.truncated_normal(filter_shape))
bias_shape = [filter_shape[-1]]
bias = variables.Variable(random_ops.truncated_normal(bias_shape))
outputs = []
conv2d_out = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
bias_out = nn_ops.bias_add(conv2d_out, bias, data_format=data_format)
relu_out = nn_ops.relu(bias_out)
outputs.append(relu_out)
for _ in range(1, num_iters):
with ops.control_dependencies([relu_out]):
conv2d_out = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
bias_out = nn_ops.bias_add(conv2d_out, bias, data_format=data_format)
relu_out = nn_ops.relu(bias_out)
outputs.append(relu_out)
return control_flow_ops.group(*outputs)
def build_fused_conv_bias_relu_graph(device, input_shape, filter_shape, strides,
padding, num_iters, data_format):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use.
num_iters: number of iterations to run conv2d.
data_format: data format string of input, 'NHWC' and 'NCHW' are
supported.
Returns:
An array of tensors to run()
"""
if data_format == "NCHW":
input_shape = [
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
]
with ops.device("/%s:0" % device):
inp = variables.Variable(random_ops.truncated_normal(input_shape))
filt = variables.Variable(random_ops.truncated_normal(filter_shape))
bias_shape = [filter_shape[-1]]
bias = variables.Variable(random_ops.truncated_normal(bias_shape))
outputs = []
fused_out = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
inp,
filt,
bias,
strides,
padding,
data_format=data_format,
activation_mode="Relu")
outputs.append(fused_out)
for _ in range(1, num_iters):
with ops.control_dependencies([fused_out]):
# pylint: disable=g-line-too-long
fused_out = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation( # pylint: disable=line-too-long
inp,
filt,
bias,
strides,
padding,
data_format=data_format,
activation_mode="Relu")
outputs.append(fused_out)
return control_flow_ops.group(*outputs)
class FusedConv2DBiasActivationBenchmark(test.Benchmark):
"""Benchmark conv2d!"""
def _run_graph(self, device, input_shape, filter_shape, strides, padding,
num_iters, data_format):
"""runs the graph and print its execution time.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use. num_iters: Number of iterations to run the
benchmark.
num_iters: number of iterations to run conv2d.
data_format: data format string of input, 'NHWC' and 'NCHW' are
supported.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_fused_conv_bias_relu_graph(device, input_shape,
filter_shape, strides, padding,
num_iters, data_format)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
# warmup runs
session.run(outputs)
start_time = time.time()
session.run(outputs)
duration = (time.time() - start_time) / num_iters
print("%s inputshape:%s filtershape:%s strides:%s padding:%s "
"%d iters: %.8f sec" % (device, str(input_shape).replace(" ", ""),
str(filter_shape).replace(" ", ""),
str(strides).replace(" ", ""), padding,
num_iters, duration))
name_template = (
"conv2d_{device}_input_shape_{inputshape}_filter_shape_{filtershape}_"
"strides_{strides}_padding_{padding}")
self.report_benchmark(
name=name_template.format(
device=device,
inputshape=str(input_shape).replace(" ", ""),
filtershape=str(filter_shape).replace(" ", ""),
strides=str(strides).replace(" ", ""),
padding=padding).replace(" ", ""),
iters=num_iters,
wall_time=duration)
return duration
def benchmark_fused_conv2d_bias_activation(self):
stride = [1, 1, 1, 1]
paddings = ["VALID", "SAME"]
data_formats = ["NHWC", "NCHW"]
resnet50_input_shapes = [[64, 14, 14, 256], [64, 14, 14, 256], [
64, 14, 14, 1024
], [64, 55, 55, 64], [64, 28, 28, 128], [64, 28, 28, 128], [64, 55, 55, 64],
[64, 7, 7, 512], [64, 7, 7, 512],
[64, 28, 28, 512], [64, 55, 55,
256], [64, 7, 7, 2048]]
resnet50_filter_shapes = [[1, 1, 256, 1024], [3, 3, 256, 256], [
1, 1, 1024, 256
], [1, 1, 64, 256], [1, 1, 128, 512], [3, 3, 128, 128], [3, 3, 64, 64], [
3, 3, 512, 512
], [1, 1, 512, 2048], [1, 1, 512, 128], [1, 1, 256, 64], [1, 1, 2048, 512]]
inception3_input_shapes = [[64, 17, 17, 768], [64, 35, 35, 96], [
64, 35, 35, 288
], [64, 8, 8, 384], [64, 8, 8, 384], [64, 17, 17, 192], [64, 35, 35, 64], [
64, 17, 17, 192
], [64, 17, 17, 160], [64, 17, 17, 160], [64, 17, 17, 768], [
64, 35, 35, 256
], [64, 35, 35, 48], [64, 35, 35, 192], [64, 17, 17, 128], [
64, 17, 17, 160
], [64, 8, 8, 448], [64, 17, 17, 128], [64, 17, 17, 768], [64, 17, 17, 160]]
inception3_filter_shapes = [[1, 1, 768, 192], [3, 3, 96, 96], [
1, 1, 288, 64
], [1, 3, 384, 384], [3, 1, 384, 384], [7, 1, 192, 192], [3, 3, 64, 96], [
1, 7, 192, 192
], [7, 1, 160, 160], [1, 7, 160, 160], [1, 1, 768, 160], [1, 1, 256, 64], [
5, 5, 48, 64
], [1, 1, 192, 64], [1, 7, 128, 128], [1, 7, 160, 192], [3, 3, 448, 384],
[7, 1, 128, 128], [1, 1, 768,
128], [7, 1, 160, 192]]
print("fused conv2d bias activation benchmark using resnet50's shapes:")
for ishape, fshape in zip(resnet50_input_shapes, resnet50_filter_shapes):
for padding in paddings:
for data_format in data_formats:
self._run_graph("gpu", ishape, fshape, stride, padding, 80,
data_format)
print("fused conv2d bias activation benchmark using inception3's shapes:")
for ishape, fshape in zip(inception3_input_shapes,
inception3_filter_shapes):
for padding in paddings:
for data_format in data_formats:
self._run_graph("gpu", ishape, fshape, stride, padding, 80,
data_format)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_benchmark.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fused convolutions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.fused_conv.python.ops import fused_conv2d_bias_activation_op_test_base as test_base
from tensorflow.python.platform import test
# Instantiate three test suites from test_base, mixing in test.TestCase as
# the test framework.
class FusedConv2DBiasActivationTest(test_base.FusedConv2DBiasActivationTest,
test.TestCase):
pass
class FusedConvInt8CPUTests(test_base.FusedConvInt8CPUTests, test.TestCase):
pass
class FusedConvInt8CorrespondenceTests(
test_base.FusedConvInt8CorrespondenceTests, test.TestCase):
pass
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides test suites that can be run to test fused convolutions.
Each of the two test suites in this module, FusedConv2DBiasActivationTest and
FusedConvInt8Tests, should be "instantiated" by declaring a class which inherits
from the FusedConv test and a class that provides the standard test.TestCase
API.
See e.g. fused_conv2d_bias_activation_op_test.py in this folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.contrib.fused_conv.python.ops import fused_conv2d_bias_activation_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384], [
4, 8, 8, 2048
], [4, 8, 8, 448], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 2048], [
4, 8, 8, 1760
], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 17, 17, 192], [
4, 17, 17, 192
], [4, 17, 17, 1248], [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 1216], [4, 17, 17, 1216], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152], [4, 17, 17, 192], [
4, 17, 17, 160
], [4, 17, 17, 1152], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [
4, 17, 17, 768
], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 768],
[4, 17, 17, 768], [4, 35, 35, 96], [4, 35, 35, 288], [
4, 35, 35, 64
], [4, 35, 35, 288], [4, 35, 35, 256], [4, 35, 35, 48], [
4, 35, 35, 256
], [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192], [
4, 35, 35, 192
], [4, 73, 73, 64], [4, 73, 73, 64], [4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384], [
1, 1, 2048, 192
], [3, 3, 448, 384], [1, 1, 2048, 320], [1, 1, 2048, 448], [1, 1, 2048, 384],
[1, 1, 1760, 384], [1, 1, 1760, 192], [1, 1, 1760, 448], [
1, 1, 1760, 320
], [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192], [
3, 3, 128, 320
], [1, 1, 1248, 128], [1, 3, 224, 224], [3, 1, 192, 256], [
1, 3, 192, 256
], [1, 1, 1216, 192], [1, 1, 1216, 96], [3, 1, 224, 224], [
3, 3, 192, 224
], [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128], [
3, 1, 192, 192
], [3, 3, 160, 192], [1, 1, 1152, 160], [1, 1, 1024, 128], [
1, 3, 128, 192
], [1, 1, 1024, 160], [3, 1, 128, 192], [1, 1, 1024, 256], [
3, 1, 128, 128
], [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128], [
1, 1, 768, 128
], [1, 1, 768, 320], [3, 3, 96, 96], [3, 3, 288, 384], [
3, 3, 64, 96
], [1, 1, 288, 64], [1, 1, 256, 64], [5, 5, 48, 64],
[1, 1, 256, 48], [3, 3, 96, 96], [1, 1, 192, 32], [
1, 1, 192, 64
], [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64,
64], [1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [
4, 8, 8, 384
], [4, 8, 8, 320], [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384], [
4, 8, 8, 192
], [4, 8, 8, 448], [4, 8, 8, 320], [4, 8, 8, 192], [4, 17, 17, 192], [
4, 17, 17, 192
], [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224], [4, 17, 17, 256], [
4, 17, 17, 256
], [4, 17, 17, 192], [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 192], [
4, 17, 17, 160
], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 192], [
4, 17, 17, 256
], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128], [
4, 17, 17, 128
], [4, 17, 17, 320], [4, 17, 17, 96], [4, 17, 17, 384], [4, 35, 35, 96], [
4, 35, 35, 64
], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48], [4, 35, 35, 96],
[4, 35, 35, 32], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 71, 71, 192], [4, 73, 73, 64], [4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def _GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NCHW", True), ("NHWC", True)]
return test_configs
def _IotaNdF32Constant(dim_sizes):
def MakeList(dims):
if len(dims) == 1:
return [float(1 + f) for f in range(dims[0])]
return [MakeList(dims[1:]) for _ in range(dims[0])]
return constant_op.constant(MakeList(dim_sizes), dtype=dtypes.float32)
def _GetInceptionFwdTest(input_size,
filter_size,
stride,
padding,
gpu_only=True):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s",
(input_size, filter_size, stride, padding))
return
tf_logging.info("Testing InceptionFwd %s",
(input_size, filter_size, stride, padding))
self.CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
class FusedConv2DBiasActivationTest(object):
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def _DtypesToTest(self, use_gpu):
return [dtypes.float32]
def _FilterFormatsToTest(self, use_gpu):
return ["HWIO", "OIHW"]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, bias,
strides, padding, activation_mode, data_format,
filter_format, dtype):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
bias: 1-D bias tensor of length output_depth.
strides: Stride: [col_stride, row_stride]
padding: Padding type.
activation_mode: Activation mode.
data_format: Format of the data tensors.
filter_format: Filter format to use for the fused convolution.
dtype: Data type for inputs and outputs.
Returns:
Symbolic tensor value and reference value that can be used to
execute the computation and verify the results.
"""
input_size = np.prod(tensor_in_sizes)
filter_size = np.prod(filter_in_sizes)
bias_size = filter_in_sizes[-1] # equals to output depth
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, input_size + 1)]
x2 = [f * 1.0 for f in range(1, filter_size + 1)]
# This is to guarantee that there are always negative values after
# bias add so that we can test whether relu works correctly.
x3 = bias
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
fused_t2 = t2
if filter_format == "OIHW":
fused_t2 = _HwioToOihw(t2)
t3 = constant_op.constant(x3, shape=[bias_size], dtype=dtype)
strides = [1] + strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
t1,
fused_t2,
t3,
strides=strides,
padding=padding,
data_format=data_format,
filter_format=filter_format,
activation_mode=activation_mode)
ref_conv_output = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
ref_bias_output = nn_ops.bias_add(
ref_conv_output, t3, data_format=data_format)
ref_output = nn_ops.relu(ref_bias_output)
if data_format == "NCHW":
output = test_util.NCHWToNHWC(output)
ref_output = test_util.NCHWToNHWC(ref_output)
return output, ref_output
def CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
x3 = np.random.rand(*[filter_in_sizes[-1]]).astype(np.float32)
def _SetupVal(data_format, use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
t3 = constant_op.constant(x3, shape=[filter_in_sizes[-1]])
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
t1,
t2,
t3,
strides=strides,
padding=padding,
data_format=data_format,
activation_mode="Relu")
if data_format == "NCHW":
output = test_util.NCHWToNHWC(output)
return output
with self.session() as sess, self.test_scope():
tensors = []
for (data_format, use_gpu) in _GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = sess.run(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, bias, strides,
padding):
with self.session() as sess, self.test_scope():
tensors = []
ref_tensors = []
for (data_format, use_gpu) in _GetTestConfigs():
with ops.device("/gpu:0" if use_gpu else "/cpu:0"):
for dtype in self._DtypesToTest(use_gpu):
for filter_format in self._FilterFormatsToTest(use_gpu):
result, expected = self._SetupValuesForDevice(
tensor_in_sizes, filter_in_sizes, bias, strides, padding,
"Relu", data_format, filter_format, dtype)
tensors.append(result)
ref_tensors.append(expected)
values = sess.run(tensors)
ref_values = sess.run(ref_tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
ref_value = ref_values[i]
tf_logging.info("expected = %s", ref_value)
tf_logging.info("actual = %s", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(
np.ravel(ref_value), np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
def testConv2D1x1Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D1x1Filter test.")
return
# expected_output = [
# 0.0, 0.0, 0.0, 21.0, 0.0, 0.0, 57.0, 0.0, 0.0, 93.0, 41.0, 0.0, 129.0,
# 86.0, 43.0, 165.0, 131.0, 97.0
# ]
medians = [-45.0, -130.0, -215.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
bias=medians,
strides=[1, 1],
padding="VALID")
def testConv2DEmpty(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DEmpty test.")
return
# expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
bias=[0.0, 0.0, 0.0],
strides=[1, 1],
padding="VALID")
def testConv2D2x2Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2Filter test.")
return
# expected_output = [0.0, 0.0, 0.0, 401.0, 533.0, 665.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2500.0, -2500.0, -2500.0],
strides=[1, 1],
padding="VALID")
def testConv2D1x2Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D1x2Filter test.")
return
# expected_output = [
# 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 190.0, 265.0, 340.0, 343.0, 436.0, 529.0
# ]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
bias=[-500.0, -500.0, -500.0],
strides=[1, 1],
padding="VALID")
def testConv2D2x2FilterStride2(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride2 test.")
return
# expected_output = [0.0, 67.0, 163.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2300.0, -2300.0, -2300.0],
strides=[2, 2],
padding="VALID")
def testConv2D2x2FilterStride2Same(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride2Same test.")
return
# expected_output = [0.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2300.0, -1000.0, -1000.0],
strides=[2, 2],
padding="SAME")
def testConv2D2x2FilterStride1x2(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride1x2 test.")
return
# expected_output = [0.0, 0.0, 8.0, 28.0, 48.0, 68.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-90.0],
strides=[1, 2],
padding="VALID")
def testConv2DKernelSmallerThanStrideValid(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSmallerThanStrideValid test.")
return
# expected_output = [0, 0, 175, 205]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-100.0],
strides=[3, 3],
padding="VALID")
def testConv2DKernelSmallerThanStrideSame(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSmallerThanStrideSame test.")
return
# expected = [0, 0, 2, 4]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
bias=[-5.0],
strides=[2, 2],
padding="SAME")
# expected = [0, 0, 4, 6]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
bias=[-5.0],
strides=[2, 2],
padding="SAME")
# expected = [4, 0, 1, 0]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-40.0],
strides=[3, 3],
padding="SAME")
def testConv2DKernelSizeMatchesInputSize(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSizeMatchesInputSize test.")
return
# expected = [0, 5]
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
bias=[-50.0, -55.0],
strides=[1, 1],
padding="VALID")
# expected = [0, 2, 282, 322]
self._VerifyValues(
tensor_in_sizes=[1, 8, 8, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-200.0],
strides=[4, 4],
padding="SAME")
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
# Incorrect filter shape.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
# Depth mismatch.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 2]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
def testOpEdgeCases(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping OpEdgeCases tests.")
return
with self.session() as sess, self.test_scope():
# Illegal strides.
with self.assertRaisesRegexp(
errors_impl.UnimplementedError,
".*strides.*in the batch and depth dimensions"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[2, 1, 1, 1],
padding="SAME",
activation_mode="Relu"))
with self.assertRaisesRegexp(
errors_impl.UnimplementedError,
".*strides.*in the batch and depth dimensions"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[1, 1, 1, 2],
padding="SAME",
activation_mode="Relu"))
# Illegal activation mode.
with self.assertRaisesRegexp(ValueError,
"Op passed string 'Tanh' not in:"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Tanh"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([32, 20, 20, 3]),
_IotaNdF32Constant([20, 21, 3, 2]),
_IotaNdF32Constant([2]),
strides=[1, 1, 1, 1],
padding="VALID",
activation_mode="Relu"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([32, 20, 20, 3]),
_IotaNdF32Constant([21, 20, 3, 2]),
_IotaNdF32Constant([2]),
strides=[1, 1, 1, 1],
padding="VALID",
activation_mode="Relu"))
# Add InceptionFwd tests to FusedConv2DBiasActivationTest.
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(_GetShrunkInceptionShapes()):
setattr(FusedConv2DBiasActivationTest, "testInceptionFwd_" + str(index),
_GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(FusedConv2DBiasActivationTest, "testInceptionFwd_No_Winograd_Nonfused",
_GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True))
def _CalculateConvolvedOutputDim(input_dim, filter_dim, stride, padding_type):
"""Calculates the size of an output dimension of a strided convolution.
Given the sizes of the corresponding dimension of the input and filter shapes,
and the stride and padding_types, calculates the size of the output dimension.
This function can be called separately for each input dimension.
Args:
input_dim: An `int` specifying the size of the input dimension.
filter_dim: An `int` specifying the size of the filter dimension.
stride: An `int` specifying the step size of the convolution along the
input dimension.
padding_type: either 'VALID' or 'SAME'.
Returns:
The size of the output dimension.
"""
if padding_type == "VALID":
return (input_dim - filter_dim + stride) // stride
else: # padding_type == 'SAME'
return (input_dim + stride - 1) // stride
def _GetFusedConvInt8TestParams():
"""Returns test parameters shared by all Int8 FusedConv tests."""
_test_params = [
{
"batch_size": 4,
"input_channels": 256,
"output_channels": 256,
"input_height": 228,
"input_width": 228,
"filter_height": 6,
"filter_width": 6,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.00002,
"side_input_scale": 0.2,
"bias_scale": 1.0,
"padding_type": "SAME"
},
{
"batch_size": 1,
"input_channels": 4,
"output_channels": 4,
"input_height": 8,
"input_width": 8,
"filter_height": 6,
"filter_width": 6,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 1,
"input_channels": 4,
"output_channels": 4,
"input_height": 6,
"input_width": 6,
"filter_height": 6,
"filter_width": 6,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "VALID"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "VALID"
},
{
"batch_size": 2,
"input_channels": 16,
"output_channels": 16,
"input_height": 9,
"input_width": 9,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.001,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 5,
"filter_width": 5,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.001,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 7,
"filter_width": 1,
"vertical_stride": 2,
"horizontal_stride": 1,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 1,
"filter_width": 7,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
]
return _test_params
def _Int8Roundtrip(fn, tensor):
return array_ops.bitcast(
fn(array_ops.bitcast(tensor, dtypes.int8)), dtypes.qint8)
def _NchwVectCToNchw(in_tensor):
# [N, C / 4, H, W, 4] => [N, C / 4, 4, H, W] == [N, C, H, W]
t = array_ops.transpose(in_tensor, [0, 1, 4, 2, 3])
n = in_tensor.shape.dims[0].value
c = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
return array_ops.reshape(t, [n, c, h, w])
def _NchwVectCToNhwc(in_tensor):
# [N, C / 4, H, W, 4] => [N, H, W, C / 4, 4] == [N, H, W, C]
t = array_ops.transpose(in_tensor, [0, 2, 3, 1, 4])
n = in_tensor.shape.dims[0].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
c = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
return array_ops.reshape(t, [n, h, w, c])
def _OihwVectIToHwio(in_tensor):
# [O, I / 4, H, W, 4] => [O, I / 4, 4, H, W] == [O, I, H, W]
t = array_ops.transpose(in_tensor, [2, 3, 1, 4, 0])
o = in_tensor.shape.dims[0].value
i = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
return array_ops.reshape(t, [h, w, i, o])
def _NchwToNchwVectC(in_tensor):
n, c, h, w = in_tensor.shape.as_list()
assert c % 4 == 0
t = array_ops.reshape(in_tensor, [n, c // 4, 4, h, w])
return array_ops.transpose(t, [0, 1, 3, 4, 2])
def _NhwcToNchwVectC(in_tensor):
# [H, H, W, C] => [N, H, W, C //4, 4] => [N, C / 4, H, W, 4]
n, h, w, c = in_tensor.shape.as_list()
assert c % 4 == 0
t = array_ops.reshape(in_tensor, [n, h, w, c // 4, 4])
return array_ops.transpose(t, [0, 3, 1, 2, 4])
def _HwioToOihw(in_tensor):
return array_ops.transpose(in_tensor, [3, 2, 0, 1])
def _SimulateFusedConv2dBiasActivationInt8OnCpu(conv_input_scale, conv_input,
kernel, padding, strides,
side_input_scale, side_input,
biases, apply_relu):
"""Simulates the int8 fused 2-D convolution op using separate float ops.
The arguments and return values have the same format, meanings and
restrictions as the actual op.
Args:
conv_input_scale: A scalar 'float'.
conv_input: A `Tensor` of type `qint8` in NHWC layout.
kernel: A `Tensor` of type `qint8` in HWIO layout.
padding: A `string` from: `"SAME", "VALID"`.
strides: A list of `ints`.
side_input_scale: A scalar 'float'.
side_input: A `Tensor` of type `qint8` in NHWC layout.
biases: A `Tensor` of type `float32` in NHWC layout.
apply_relu: A boolean to specify whether to apply "Relu" activation function
that clips outputs to the range [0, 127], or "None" activation that clips
to the range [-128, 127].
Returns:
A `Tensor` of type `qint8` in NHWC layout.
"""
conv_result = nn_ops.conv2d(
math_ops.cast(conv_input, dtypes.float32),
math_ops.cast(kernel, dtypes.float32),
strides=strides,
padding=padding,
data_format="NHWC") * conv_input_scale
conv_and_side_inputs = conv_result + side_input_scale * math_ops.cast(
side_input, dtypes.float32)
output = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NHWC")
if apply_relu:
output = nn_ops.relu(output)
# In this case quantization is identical to clipping and casting.
result, _, _ = gen_array_ops.quantize_v2(output, -128, 127, dtypes.qint8)
return result
# FusedConv2DBiasActivation on CPU supports only NHWC/HWIO data format.
class FusedConvInt8CPUTests(object):
"""Verify quantization with CPU kernel."""
_test_params = _GetFusedConvInt8TestParams()
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def runTest(self, test_param, apply_relu):
"""Runs tests for dimensions configured in test_param."""
batch_size = test_param["batch_size"]
input_channels = test_param["input_channels"]
output_channels = test_param["output_channels"]
input_height = test_param["input_height"]
input_width = test_param["input_width"]
filter_height = test_param["filter_height"]
filter_width = test_param["filter_width"]
vertical_stride = test_param["vertical_stride"]
horizontal_stride = test_param["horizontal_stride"]
conv_input_scale = test_param["conv_input_scale"]
side_input_scale = test_param["side_input_scale"]
bias_scale = test_param["bias_scale"]
padding_type = test_param["padding_type"]
with self.session() as sess, self.test_scope():
conv_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, input_height, input_width, input_channels],
minval=-0.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
self.assertTrue(
sess.run(
math_ops.reduce_all(
math_ops.greater_equal(
array_ops.bitcast(conv_input, dtypes.int8), 0))))
kernel, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[filter_height, filter_width, input_channels, output_channels],
minval=-1.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
output_height = _CalculateConvolvedOutputDim(input_height, filter_height,
vertical_stride,
padding_type)
output_width = _CalculateConvolvedOutputDim(input_width, filter_width,
horizontal_stride,
padding_type)
tf_logging.info("output_height=%s, output_width=%s", output_height,
output_width)
side_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, output_height, output_width, output_channels],
minval=0.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
biases = random_ops.random_uniform([output_channels],
minval=-10 * bias_scale,
maxval=20 * bias_scale,
dtype=dtypes.float32)
strides = [1, vertical_stride, horizontal_stride, 1]
actual = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
kernel,
biases,
strides=strides,
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else side_input),
activation_mode="Relu" if apply_relu else "None",
data_format="NHWC",
filter_format="HWIO")
expected = _SimulateFusedConv2dBiasActivationInt8OnCpu(
conv_input_scale, conv_input, kernel, padding_type, strides,
side_input_scale, side_input, biases, apply_relu)
actual_y, expected_y = sess.run([actual, expected])
self.assertAllClose(actual_y, expected_y, rtol=0, atol=1)
def testFusedConvInt8(self):
for apply_relu in [True, False]:
for test_param in self._test_params:
self.runTest(test_param, apply_relu)
def testRoundingMode(self):
"""Verify the fused convolution op uses half-to-even rounding mode."""
batches = 1
input_size = 2
input_channels = 1
output_channels = 1
conv_input = np.array([1, 2, 3, 4]).reshape(
(batches, input_size, input_size, input_channels)).astype(np.int8)
kernel = np.array([1]).reshape(
(1, 1, input_channels, output_channels)).astype(np.int8)
biases = np.zeros((output_channels)).astype(np.float32)
with self.session() as sess, self.test_scope():
actual = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
math_ops.cast(conv_input, dtypes.qint8),
math_ops.cast(kernel, dtypes.qint8),
biases,
strides=[1, 1, 1, 1],
padding="SAME",
conv_input_scale=0.5,
side_input_scale=0.0,
activation_mode="None",
data_format="NHWC",
filter_format="HWIO")
actual_value = sess.run(actual)
# The convolution output scaled is [0.5, 1.0, 1.5, 2.0]. After rounding
# half to even, the final output is [0, 1, 2, 2].
self.assertTrue(
np.array_equal(actual_value.flatten(),
np.array([0, 1, 2, 2]).astype(np.int8)))
# Test that GPU and CPU kernels produce identical results for QInt8 data type.
class FusedConvInt8CorrespondenceTests(object):
"""Verify quantization with CPU kernel."""
_test_params = _GetFusedConvInt8TestParams()
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def runTest(self, test_param, apply_relu):
"""Runs tests for dimensions configured in test_param."""
batch_size = test_param["batch_size"]
input_channels = test_param["input_channels"]
output_channels = test_param["output_channels"]
input_height = test_param["input_height"]
input_width = test_param["input_width"]
filter_height = test_param["filter_height"]
filter_width = test_param["filter_width"]
vertical_stride = test_param["vertical_stride"]
horizontal_stride = test_param["horizontal_stride"]
conv_input_scale = test_param["conv_input_scale"]
side_input_scale = test_param["side_input_scale"]
bias_scale = test_param["bias_scale"]
padding_type = test_param["padding_type"]
with self.session() as sess, self.test_scope():
conv_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, input_channels // 4, input_height, input_width, 4],
minval=0.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
self.assertTrue(
sess.run(
math_ops.reduce_all(
math_ops.greater_equal(
array_ops.bitcast(conv_input, dtypes.int8), 0))))
kernel, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform([
output_channels, input_channels // 4, filter_height, filter_width,
4
],
minval=-128.0,
maxval=127.0,
dtype=dtypes.float32),
-128.0,
127.0,
dtypes.qint8,
mode="SCALED")
output_height = _CalculateConvolvedOutputDim(input_height, filter_height,
vertical_stride,
padding_type)
output_width = _CalculateConvolvedOutputDim(input_width, filter_width,
horizontal_stride,
padding_type)
tf_logging.info("output_height=%s, output_width=%s", output_height,
output_width)
side_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform([
batch_size, output_channels // 4, output_height, output_width, 4
],
minval=0.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
biases = random_ops.random_uniform([output_channels],
minval=-10 * bias_scale,
maxval=20 * bias_scale,
dtype=dtypes.float32)
with ops.device("/cpu:0"):
t = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_Int8Roundtrip(_NchwVectCToNhwc, conv_input),
_Int8Roundtrip(_OihwVectIToHwio, kernel),
biases,
strides=[1, vertical_stride, horizontal_stride, 1],
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else _Int8Roundtrip(
_NchwVectCToNhwc, side_input)),
activation_mode="Relu" if apply_relu else "None",
data_format="NHWC",
filter_format="HWIO")
cpu_result = _Int8Roundtrip(_NhwcToNchwVectC, t)
with ops.device("/gpu:0"):
t = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
kernel,
biases,
strides=[1, 1, vertical_stride, horizontal_stride],
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else side_input),
activation_mode="Relu" if apply_relu else "None",
data_format="NCHW_VECT_C",
filter_format="OIHW_VECT_I")
gpu_result = t
cpu_y, gpu_y = sess.run([cpu_result, gpu_result])
self.assertAllClose(cpu_y, gpu_y, rtol=0, atol=0)
def testFusedConvInt8(self):
if not test.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(6, 1)):
tf_logging.info("int8 test skipped because not run with --config=cuda or "
"no GPUs with compute capability >= 6.1 are available.")
return
for apply_relu in [True, False]:
for test_param in self._test_params:
self.runTest(test_param, apply_relu)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test_base.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for training linear models.
## This package provides optimizers to train linear models.
@@SdcaModel
@@SparseFeatureColumn
@@SDCAOptimizer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.contrib.linear_optimizer.python.sdca_optimizer import SDCAOptimizer
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for linear_optimizer.sdca_estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.linear_optimizer.python import sdca_estimator
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
class SDCALogisticClassifierTest(test.TestCase):
def _single_threaded_test_session(self):
# TODO(andreasst): figure out why SDCALinearRegressor needs a single
# threaded session to pass in tsan mode but SDCALogisticClassifier does not.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
return self.test_session(config=config)
def testRealValuedFeatures(self):
"""Tests SDCALogisticClassifier works with real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([500.0, 200.0]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
with self._single_threaded_test_session():
maintenance_cost = feature_column_lib.real_valued_column(
'maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights')
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testRealValuedFeatureWithHigherDimension(self):
"""Tests SDCALogisticClassifier with high-dimension real valued features."""
# input_fn is identical to the one in testRealValuedFeatures where 2
# 1-dimensional dense features are replaced by a 2-dimensional feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
with self._single_threaded_test_session():
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id', feature_columns=[dense_feature])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testBucketizedFeatures(self):
"""Tests SDCALogisticClassifier with bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([600.0, 1000.0, 400.0]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
with self._single_threaded_test_session():
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0])
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
l2_regularization=1.0)
classifier.fit(input_fn=input_fn, steps=50)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(metrics['accuracy'], 0.9)
def testSparseFeatures(self):
"""Tests SDCALogisticClassifier with sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
with self._single_threaded_test_session():
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[price, country],
weight_column_name='weights')
classifier.fit(input_fn=input_fn, steps=50)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(metrics['accuracy'], 0.9)
def testWeightedSparseFeatures(self):
"""Tests SDCALogisticClassifier with weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
with self._single_threaded_test_session():
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[country_weighted_by_price])
classifier.fit(input_fn=input_fn, steps=50)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(metrics['accuracy'], 0.9)
def testSparseFeaturesWithDuplicates(self):
"""Tests SDCALogisticClassifier with duplicated sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'age':
sparse_tensor.SparseTensor(
values=['20-29'] * 5 + ['31-40'] * 5,
indices=[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [1, 0],
[1, 0], [1, 0], [1, 0], [1, 0]],
dense_shape=[2, 1]),
'gender':
sparse_tensor.SparseTensor(
values=['m'] * 5 + ['f'] * 5,
indices=[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [1, 0],
[1, 0], [1, 0], [1, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
with self._single_threaded_test_session():
age = feature_column_lib.sparse_column_with_hash_bucket(
'age', hash_bucket_size=10)
gender = feature_column_lib.sparse_column_with_hash_bucket(
'gender', hash_bucket_size=10)
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id', feature_columns=[age, gender])
classifier.fit(input_fn=input_fn, steps=50)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertLess(metrics['loss'], 0.060)
def testCrossedFeatures(self):
"""Tests SDCALogisticClassifier with crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
with self._single_threaded_test_session():
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id', feature_columns=[country_language])
classifier.fit(input_fn=input_fn, steps=10)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(metrics['accuracy'], 0.9)
def testMixedFeatures(self):
"""Tests SDCALogisticClassifier with a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([900.0, 700.0, 600.0]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
with self._single_threaded_test_session():
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[
price, sq_footage_bucket, country, sq_footage_country
],
weight_column_name='weights')
classifier.fit(input_fn=input_fn, steps=50)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(metrics['accuracy'], 0.9)
def testPartitionedMixedFeatures(self):
"""Tests SDCALogisticClassifier with a mix of features (partitioned)."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([900.0, 700.0, 600.0]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
with self._single_threaded_test_session():
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
classifier = sdca_estimator.SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[
price, sq_footage_bucket, country, sq_footage_country
],
weight_column_name='weights',
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
classifier.fit(input_fn=input_fn, steps=50)
metrics = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(metrics['accuracy'], 0.9)
class SDCALinearRegressorTest(test.TestCase):
def _single_threaded_test_session(self):
# TODO(andreasst): figure out why SDCALinearRegressor needs a single
# threaded session to pass in tsan mode but SDCALogisticClassifier does not.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
return self.test_session(config=config)
def testRealValuedLinearFeatures(self):
"""Tests SDCALinearRegressor works with real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
with self._single_threaded_test_session():
x_column = feature_column_lib.real_valued_column('x', dimension=3)
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[x_column],
weight_column_name='weights')
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testMixedFeaturesArbitraryWeights(self):
"""Tests SDCALinearRegressor works with a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
with self._single_threaded_test_session():
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[
price, sq_footage_bucket, country, sq_footage_country
],
l2_regularization=1.0,
weight_column_name='weights')
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testMixedFeaturesArbitraryWeightsPartitioned(self):
"""Tests SDCALinearRegressor works with a mix of features (partitioned)."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
with self._single_threaded_test_session():
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[
price, sq_footage_bucket, country, sq_footage_country
],
l2_regularization=1.0,
weight_column_name='weights',
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""SDCALinearRegressor works with sparse features and L1 regularization."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
with self._single_threaded_test_session():
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[price, country],
weight_column_name='weights')
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight':
regressor.get_variable_value('linear/price/weight'),
'linear/country/weights':
regressor.get_variable_value('linear/country/weights'),
}
# Regressor with L1 regularization.
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[price, country],
l1_regularization=1.0,
weight_column_name='weights')
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight':
regressor.get_variable_value('linear/price/weight'),
'linear/country/weights':
regressor.get_variable_value('linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have
# smaller L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testBiasOnly(self):
"""Tests SDCALinearRegressor has a valid bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant([[1 if i % 4 == 0 else 0]
for i in range(num_examples)])
with self._single_threaded_test_session():
place_holder = feature_column_lib.real_valued_column('place_holder')
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id', feature_columns=[place_holder])
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testBiasAndOtherColumns(self):
"""SDCALinearRegressor has valid bias weight with other columns present."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
with self._single_threaded_test_session():
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
])
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testBiasAndOtherColumnsFabricatedCentered(self):
"""SDCALinearRegressor has valid bias weight when instances are centered."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
with self._single_threaded_test_session():
regressor = sdca_estimator.SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
])
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/sdca_estimator_test.py
|
"""Linear Estimators."""
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.linear_optimizer.python.ops import sdca_ops
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# TODO(sibyl-vie3Poto, sibyl-Aix6ihai): Add proper testing to this wrapper once the API is
# stable.
class SDCAOptimizer(object):
"""Wrapper class for SDCA optimizer.
The wrapper is currently meant for use as an optimizer within a tf.learn
Estimator.
Example usage:
```python
real_feature_column = real_valued_column(...)
sparse_feature_column = sparse_column_with_hash_bucket(...)
sdca_optimizer = linear.SDCAOptimizer(example_id_column='example_id',
num_loss_partitions=1,
num_table_shards=1,
symmetric_l2_regularization=2.0)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[real_feature_column, sparse_feature_column],
weight_column_name=...,
optimizer=sdca_optimizer)
classifier.fit(input_fn_train, steps=50)
classifier.evaluate(input_fn=input_fn_eval)
```
Here the expectation is that the `input_fn_*` functions passed to train and
evaluate return a pair (dict, label_tensor) where dict has `example_id_column`
as `key` whose value is a `Tensor` of shape [batch_size] and dtype string.
num_loss_partitions defines the number of partitions of the global loss
function and should be set to `(#concurrent train ops/per worker)
x (#workers)`.
Convergence of (global) loss is guaranteed if `num_loss_partitions` is larger
or equal to the above product. Larger values for `num_loss_partitions` lead to
slower convergence. The recommended value for `num_loss_partitions` in
`tf.learn` (where currently there is one process per worker) is the number
of workers running the train steps. It defaults to 1 (single machine).
`num_table_shards` defines the number of shards for the internal state
table, typically set to match the number of parameter servers for large
data sets. You can also specify a `partitioner` object to partition the primal
weights during training (`div` partitioning strategy will be used).
"""
def __init__(self,
example_id_column,
num_loss_partitions=1,
num_table_shards=None,
symmetric_l1_regularization=0.0,
symmetric_l2_regularization=1.0,
adaptive=True,
partitioner=None):
self._example_id_column = example_id_column
self._num_loss_partitions = num_loss_partitions
self._num_table_shards = num_table_shards
self._symmetric_l1_regularization = symmetric_l1_regularization
self._symmetric_l2_regularization = symmetric_l2_regularization
self._adaptive = adaptive
self._partitioner = partitioner
def get_name(self):
return 'SDCAOptimizer'
@property
def example_id_column(self):
return self._example_id_column
@property
def num_loss_partitions(self):
return self._num_loss_partitions
@property
def num_table_shards(self):
return self._num_table_shards
@property
def symmetric_l1_regularization(self):
return self._symmetric_l1_regularization
@property
def symmetric_l2_regularization(self):
return self._symmetric_l2_regularization
@property
def adaptive(self):
return self._adaptive
@property
def partitioner(self):
return self._partitioner
def get_train_step(self, columns_to_variables, weight_column_name, loss_type,
features, targets, global_step):
"""Returns the training operation of an SdcaModel optimizer."""
def _dense_tensor_to_sparse_feature_column(dense_tensor):
"""Returns SparseFeatureColumn for the input dense_tensor."""
ignore_value = 0.0
sparse_indices = array_ops.where(
math_ops.not_equal(dense_tensor,
math_ops.cast(ignore_value, dense_tensor.dtype)))
sparse_values = array_ops.gather_nd(dense_tensor, sparse_indices)
# TODO(sibyl-Aix6ihai, sibyl-vie3Poto): Makes this efficient, as now SDCA supports
# very sparse features with weights and not weights.
return SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=sparse_indices, num_or_size_splits=2, axis=1)[0], [-1]),
array_ops.reshape(
array_ops.split(
value=sparse_indices, num_or_size_splits=2, axis=1)[1], [-1]),
array_ops.reshape(math_ops.cast(sparse_values, dtypes.float32), [-1]))
def _training_examples_and_variables():
"""Returns dictionaries for training examples and variables."""
batch_size = targets.get_shape()[0]
# Iterate over all feature columns and create appropriate lists for dense
# and sparse features as well as dense and sparse weights (variables) for
# SDCA.
# TODO(sibyl-vie3Poto): Reshape variables stored as values in column_to_variables
# dict as 1-dimensional tensors.
dense_features, sparse_features, sparse_feature_with_values = [], [], []
dense_feature_weights = []
sparse_feature_weights, sparse_feature_with_values_weights = [], []
for column in sorted(columns_to_variables.keys(), key=lambda x: x.key):
transformed_tensor = features[column]
if isinstance(column, layers.feature_column._RealValuedColumn): # pylint: disable=protected-access
# A real-valued column corresponds to a dense feature in SDCA. A
# transformed tensor corresponding to a RealValuedColumn should have
# rank at most 2. In order to be passed to SDCA, its rank needs to be
# exactly 2 (i.e., its shape should be [batch_size, column.dim]).
check_rank_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(transformed_tensor), 2),
['transformed_tensor should have rank at most 2.'])
# Reshape to [batch_size, dense_column_dimension].
with ops.control_dependencies([check_rank_op]):
transformed_tensor = array_ops.reshape(transformed_tensor, [
array_ops.shape(transformed_tensor)[0], -1
])
dense_features.append(transformed_tensor)
# For real valued columns, the variables list contains exactly one
# element.
dense_feature_weights.append(columns_to_variables[column][0])
elif isinstance(column, layers.feature_column._BucketizedColumn): # pylint: disable=protected-access
# A bucketized column corresponds to a sparse feature in SDCA. The
# bucketized feature is "sparsified" for SDCA by converting it to a
# SparseFeatureColumn representing the one-hot encoding of the
# bucketized feature.
#
# TODO(sibyl-vie3Poto): Explore whether it is more efficient to translate a
# bucketized feature column to a dense feature in SDCA. This will
# likely depend on the number of buckets.
dense_bucket_tensor = column._to_dnn_input_layer(transformed_tensor) # pylint: disable=protected-access
sparse_feature_column = _dense_tensor_to_sparse_feature_column(
dense_bucket_tensor)
sparse_feature_with_values.append(sparse_feature_column)
# If a partitioner was used during variable creation, we will have a
# list of Variables here larger than 1.
vars_to_append = columns_to_variables[column][0]
if len(columns_to_variables[column]) > 1:
vars_to_append = columns_to_variables[column]
sparse_feature_with_values_weights.append(vars_to_append)
elif isinstance(
column,
(
layers.feature_column._WeightedSparseColumn, # pylint: disable=protected-access
layers.feature_column._CrossedColumn, # pylint: disable=protected-access
layers.feature_column._SparseColumn)): # pylint: disable=protected-access
if isinstance(column, layers.feature_column._WeightedSparseColumn): # pylint: disable=protected-access
id_tensor = column.id_tensor(transformed_tensor)
weight_tensor = array_ops.reshape(
column.weight_tensor(transformed_tensor).values, [-1])
else:
id_tensor = transformed_tensor
weight_tensor = array_ops.ones(
[array_ops.shape(id_tensor.indices)[0]], dtypes.float32)
example_ids = array_ops.reshape(id_tensor.indices[:, 0], [-1])
flat_ids = array_ops.reshape(id_tensor.values, [-1])
# Prune invalid IDs (< 0) from the flat_ids, example_ids, and
# weight_tensor. These can come from looking up an OOV entry in the
# vocabulary (default value being -1).
is_id_valid = math_ops.greater_equal(flat_ids, 0)
flat_ids = array_ops.boolean_mask(flat_ids, is_id_valid)
example_ids = array_ops.boolean_mask(example_ids, is_id_valid)
weight_tensor = array_ops.boolean_mask(weight_tensor, is_id_valid)
projection_length = math_ops.reduce_max(flat_ids) + 1
# project ids based on example ids so that we can dedup ids that
# occur multiple times for a single example.
projected_ids = projection_length * example_ids + flat_ids
# Remove any redundant ids.
ids, idx = array_ops.unique(projected_ids)
# Keep only one example id per duplicated ids.
example_ids_filtered = math_ops.unsorted_segment_min(
example_ids, idx,
array_ops.shape(ids)[0])
# reproject ids back feature id space.
reproject_ids = (ids - projection_length * example_ids_filtered)
weights = array_ops.reshape(
math_ops.unsorted_segment_sum(weight_tensor, idx,
array_ops.shape(ids)[0]), [-1])
sparse_feature_with_values.append(
SparseFeatureColumn(example_ids_filtered, reproject_ids, weights))
# If a partitioner was used during variable creation, we will have a
# list of Variables here larger than 1.
vars_to_append = columns_to_variables[column][0]
if len(columns_to_variables[column]) > 1:
vars_to_append = columns_to_variables[column]
sparse_feature_with_values_weights.append(vars_to_append)
else:
raise ValueError('SDCAOptimizer does not support column type %s.' %
type(column).__name__)
example_weights = array_ops.reshape(
features[weight_column_name],
shape=[-1]) if weight_column_name else array_ops.ones([batch_size])
example_ids = features[self._example_id_column]
sparse_feature_with_values.extend(sparse_features)
sparse_feature_with_values_weights.extend(sparse_feature_weights)
examples = dict(
sparse_features=sparse_feature_with_values,
dense_features=dense_features,
example_labels=math_ops.cast(
array_ops.reshape(targets, shape=[-1]), dtypes.float32),
example_weights=example_weights,
example_ids=example_ids)
sdca_variables = dict(
sparse_features_weights=sparse_feature_with_values_weights,
dense_features_weights=dense_feature_weights)
return examples, sdca_variables
training_examples, training_variables = _training_examples_and_variables()
sdca_model = sdca_ops.SdcaModel(
examples=training_examples,
variables=training_variables,
options=dict(
symmetric_l1_regularization=self._symmetric_l1_regularization,
symmetric_l2_regularization=self._symmetric_l2_regularization,
adaptive=self._adaptive,
num_loss_partitions=self._num_loss_partitions,
num_table_shards=self._num_table_shards,
loss_type=loss_type))
train_op = sdca_model.minimize(global_step=global_step)
return sdca_model, train_op
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import session_run_hook
def _head_is_valid_for_sdca(head):
"""Returns true if the provided head is supported by SDCAOptimizer."""
# pylint: disable=protected-access
return isinstance(head, head_lib._BinaryLogisticHead) or isinstance(
head, head_lib._BinarySvmHead) or isinstance(head,
head_lib._RegressionHead)
# pylint: enable=protected-access
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables):
"""Adds a fake bias feature column filled with all 1s."""
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
if not feature_columns:
raise ValueError("feature_columns can't be empty.")
# Loop through input tensors until we can figure out batch_size.
batch_size = None
for column in columns_to_tensors.values():
if isinstance(column, tuple):
column = column[0]
if isinstance(column, sparse_tensor.SparseTensor):
shape = tensor_util.constant_value(column.dense_shape)
if shape is not None:
batch_size = shape[0]
break
else:
batch_size = array_ops.shape(column)[0]
break
if batch_size is None:
raise ValueError("Could not infer batch size from input features.")
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones(
[batch_size, 1], dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def sdca_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` with values in the set {0, 1}.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* l1_regularization: Global (across all examples) L1-regularization
parameter.
* l2_regularization: Global (across all examples) L2-regularization
parameter.
* num_loss_partitions: Number of partitions of the global loss function
optimized by `SDCAOptimizer`.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If the type of head is not one of `_BinarySvmHead`,
`_RegressionHead` or `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
example_id_column = params["example_id_column"]
l1_regularization = params["l1_regularization"]
l2_regularization = params["l2_regularization"]
num_loss_partitions = params["num_loss_partitions"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
partitioner = params["partitioner"]
loss_type = None
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(type(head)))
assert head.logits_dimension == 1, (
"SDCA only applies to logits_dimension=1.")
# Update num_loss_partitions based on number of workers.
n_loss_partitions = num_loss_partitions or max(1, config.num_worker_replicas)
optimizer = sdca_optimizer.SDCAOptimizer(
example_id_column=example_id_column,
num_loss_partitions=n_loss_partitions,
symmetric_l1_regularization=l1_regularization,
symmetric_l2_regularization=l2_regularization,
partitioner=partitioner)
parent_scope = "linear"
with variable_scope.variable_scope(
values=features.values(), name_or_scope=parent_scope,
partitioner=partitioner) as scope:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = training_util.get_global_step()
sdca_model, train_op = optimizer.get_train_step(
columns_to_variables, weight_column_name, loss_type, features, labels,
global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(training_chief_hooks=(
model_fn_ops.training_chief_hooks + [update_weights_hook]))
return model_fn_ops
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class _SDCAEstimator(estimator.Estimator):
"""Base estimator class for linear models using the SDCA optimizer.
This class should not be used directly. Rather, users should call one of the
derived estimators.
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
head=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None,
partitioner=None):
"""Construct a `_SDCAEstimator` estimator object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
head: type of head. Currently, _BinaryLogisticHead and _BinarySvmHead are
supported for classification and _RegressionHead for regression. It
should be a subclass of _SingleHead.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: number of partitions of the (global) loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
partitioner: Variable partitioner for the primal weights (`div`
partitioning strategy will be used).
Returns:
A `_SDCAEstimator` estimator.
Raises:
ValueError: if head is not supported by SDCA.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
if not _head_is_valid_for_sdca(head):
raise ValueError(
"head type: {} is not supported. Supported head types: "
"_BinaryLogisticHead, _BinarySvmHead and _RegressionHead.".format(
type(head)))
assert head.logits_dimension == 1
params = {
"head": head,
"feature_columns": feature_columns,
"example_id_column": example_id_column,
"num_loss_partitions": num_loss_partitions,
"l1_regularization": l1_regularization,
"l2_regularization": l2_regularization,
"weight_column_name": weight_column_name,
"update_weights_hook": _SdcaUpdateWeightsHook(),
"partitioner": partitioner,
}
super(_SDCAEstimator, self).__init__(
model_fn=sdca_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
class SDCALogisticClassifier(_SDCAEstimator):
"""Logistic regression binary classifier using the SDCA optimizer.
Example usage:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
classifier = SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b]),
weight_column_name=...,
l2_regularization=...,
num_loss_partitions=...,
)
# Input builders
# returns x, y (where y is the label Tensor (with 0/1 values)
def input_fn_{train, eval}:
# returns x (features dict)
def input_fn_test:
...
classifier.fit(input_fn=input_fn_train)
classifier.evaluate(input_fn=input_fn_eval)
# Returns predicted classes.
classifier.predict_classes(input_fn=input_fn_test)
# Returns predicted probabilities.
classifier.predict_proba(input_fn=input_fn_test)
```
The input_fn provided to `fit`, `evaluate` and predict_* methods should return
the following features, otherwise there will be a `KeyError`:
* A feature with `key=example_id_column` whose value is a `Tensor` of dtype
string.
* If `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* For each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name` whose
`value` is a `SparseTensor`
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None,
partitioner=None):
"""Construct a `SDCALogisticClassifier` object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the iterable should derive from `FeatureColumn`.
Note that the order of the items is ignored at model construction time.
weight_column_name: A string defining feature column name representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: Number of partitions of the global loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
partitioner: Variable partitioner for the primal weights (`div`
partitioning strategy will be used).
Returns:
A `SDCALogisiticClassifier` estimator.
"""
super(SDCALogisticClassifier, self).__init__(
example_id_column=example_id_column,
feature_columns=feature_columns,
weight_column_name=weight_column_name,
model_dir=model_dir,
head=head_lib.multi_class_head(
n_classes=2, weight_column_name=weight_column_name),
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
num_loss_partitions=num_loss_partitions,
config=config,
feature_engineering_fn=None,
partitioner=partitioner)
def predict_classes(self, input_fn=None):
"""Runs inference to determine the predicted class.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted classes for the features provided by input_fn.
"""
key = prediction_key.PredictionKey.CLASSES
predictions = super(SDCALogisticClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
def predict_proba(self, input_fn=None):
"""Runs inference to determine the class probability predictions.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted class probabilities for the features provided by
input_fn.
"""
key = prediction_key.PredictionKey.PROBABILITIES
predictions = super(SDCALogisticClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
class SDCALinearRegressor(_SDCAEstimator):
"""Linear regression model using SDCA to solve the underlying optimization.
Example usage:
```python
real_column_a = real_valued_column(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
regressor = SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[real_column_a, sparse_column_b]),
weight_column_name=...,
l2_regularization=...,
num_loss_partitions=...,
)
# Input builders
# returns x, y (where y is the label Tensor (with 0/1 values)
def input_fn_{train, eval}:
# returns x (features dict)
def input_fn_test:
...
regressor.fit(input_fn=input_fn_train)
regressor.evaluate(input_fn=input_fn_eval)
regressor.predict_scores(input_fn=input_fn_test) # returns predicted scores.
```
The input_fn provided to `fit`, `evaluate` and predict_* methods should return
the following features, otherwise there will be a `KeyError`:
* A feature with `key=example_id_column` whose value is a `Tensor` of dtype
string.
* If `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* For each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name` whose
`value` is a `SparseTensor`
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None,
partitioner=None):
"""Construct a `SDCALinearRegressor` estimator object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the iterable should derive from `FeatureColumn`.
Note that the order of the items is ignored at model construction time.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: number of partitions of the (global) loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
partitioner: Variable partitioner for the primal weights (`div`
partitioning strategy will be used).
Returns:
A `SDCALinearRegressor` estimator.
"""
super(SDCALinearRegressor, self).__init__(
example_id_column=example_id_column,
feature_columns=feature_columns,
weight_column_name=weight_column_name,
model_dir=model_dir,
head=head_lib.regression_head(weight_column_name=weight_column_name),
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
num_loss_partitions=num_loss_partitions,
config=config,
feature_engineering_fn=None,
partitioner=partitioner)
def predict_scores(self, input_fn):
"""Returns predicted scores for given features.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted scores for the features provided by input_fn.
"""
key = prediction_key.PredictionKey.SCORES
predictions = super(SDCALinearRegressor, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/sdca_estimator.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.core.example import example_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sdca_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import googletest
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3]
_NUM_LOSS_PARTITIONS = [4]
def make_example_proto(feature_dict, target, value=1.0):
e = example_pb2.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target':
parsing_ops.FixedLenFeature(
shape=[1], dtype=dtypes.float32, default_value=0),
'age_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'age_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32),
'gender_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'gender_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32)
}
return parsing_ops.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['age_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['age_indices'].values, [-1]),
array_ops.reshape(parsed['age_values'].values, [-1])),
SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['gender_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['gender_indices'].values, [-1]),
array_ops.reshape(parsed['gender_values'].values, [-1]))
]
return dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=array_ops.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_random_examples_and_variables_dicts(num_examples, dim, num_non_zero):
random.seed(1)
sparse_features = [
SparseFeatureColumn(
[i for i in range(num_examples) for _ in range(num_non_zero)], [
i for _ in range(num_examples)
for i in random.sample(range(dim), num_non_zero)
],
[num_non_zero**(-0.5) for _ in range(num_examples * num_non_zero)])
]
examples_dict = dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=[random.random() for _ in range(num_examples)],
example_labels=[
1. if random.random() > 0.5 else 0. for _ in range(num_examples)
],
example_ids=[str(i) for i in range(num_examples)])
weights = variables_lib.VariableV1(
array_ops.zeros([dim], dtype=dtypes.float32))
variables_dict = dict(
sparse_features_weights=[weights],
dense_features_weights=[])
return examples_dict, variables_dict
def make_variable_dict(max_age, max_gender, num_shards=None, partitioned=False):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
partitioner = None
if partitioned:
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2,
axis=0)
with variable_scope.variable_scope(
name_or_scope=('variables/shard_{}'.format(num_shards)
if num_shards else 'variables'),
partitioner=partitioner):
age_weights = variable_scope.get_variable(
name='age',
initializer=array_ops.zeros([max_age + 1], dtype=dtypes.float32))
gender_weights = variable_scope.get_variable(
name='gender',
initializer=array_ops.zeros([max_gender + 1], dtype=dtypes.float32))
return dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = ops.convert_to_tensor(dense_feature, dtype=dtypes.float32)
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with ops.control_dependencies([check_shape_op]):
dense_tensor = array_ops.reshape(
dense_tensor, [dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
variables_lib.VariableV1(
array_ops.zeros(
[dense_tensor.get_shape().as_list()[1]], dtype=dtypes.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return math_ops.cast(
math_ops.greater_equal(predictions,
array_ops.ones_like(predictions) * cutoff),
dtype=dtypes.int32)
def get_binary_predictions_for_hinge(predictions):
return math_ops.cast(
math_ops.greater_equal(predictions, array_ops.zeros_like(predictions)),
dtype=dtypes.int32)
# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
class SdcaModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
class SdcaWithLogisticLossTest(SdcaModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testPartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards, partitioned=True)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSomePartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [0],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
# Explicitly make age a [1]-shaped Variable (which cannot be
# partitioned), while making gender a PartitionedVariable.
age_weights = variables_lib.VariableV1(
array_ops.zeros([1], dtype=dtypes.float32))
with variable_scope.variable_scope(
name_or_scope=('variables/shard_{}'.format(num_shards)
if num_shards else 'variables'),
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0)):
gender_weights = variable_scope.get_variable(
name='gender',
initializer=array_ops.zeros([2], dtype=dtypes.float32))
variables = dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.593014 is the unregularized_loss at that optimum.
self.assertAllClose(0.512591, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.593014, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSparseRandom(self):
dim = 20
num_examples = 1000
# Number of non-zero features per example.
non_zeros = 10
# Setup test data.
with self._single_threaded_test_session():
examples, variables = make_random_examples_and_variables_dicts(
num_examples, dim, non_zeros)
options = dict(
symmetric_l2_regularization=.1,
symmetric_l1_regularization=0,
num_table_shards=1,
adaptive=False,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
for _ in range(4):
train_op.run()
lr.update_weights(train_op).run()
# Duality gap is 1.4e-5.
# It would be 0.01 without shuffling and 0.02 with adaptive sampling.
self.assertNear(0.0, lr.approximate_duality_gap().eval(), err=1e-3)
def testSparseDuplicate(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0] * 5,
'gender': [0] * 5
}, 0),
make_example_proto({
'age': [1] * 5,
'gender': [1] * 5
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'Duplicate'):
train_op.run()
def testDistributedSimple(self):
# Distributed SDCA may not converge if the workers update concurrently the
# same example. In this test the examples are partitioned across workers.
# The examples are the same for all workers, just the example_ids are
# different.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
examples = make_example_dict(example_protos, example_weights)
example_ids = array_ops.placeholder(
dtypes.string, shape=(len(example_weights),))
examples['example_ids'] = example_ids
variables = make_variable_dict(1, 1)
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
for num_shards in _SHARD_NUMBERS:
for num_loss_partitions in _NUM_LOSS_PARTITIONS:
with self._single_threaded_test_session():
options = dict(
# Keep the same solution as for TestSimple: since the number of
# examples is multplied by num_loss_partitions, multiply also
# L2 by the same value.
symmetric_l2_regularization=num_loss_partitions,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_table_shards=num_shards,
num_loss_partitions=num_loss_partitions)
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def minimize(worker_id):
with self._single_threaded_test_session():
feed_dict = {example_ids: [
str(i + worker_id*len(example_weights)) for i in range(
len(example_weights))]}
for _ in range(_MAX_ITERATIONS):
train_op.run(feed_dict=feed_dict) # pylint: disable=cell-var-from-loop
threads = []
for worker_id in range(num_loss_partitions):
threads.append(threading.Thread(target=minimize, args=(worker_id,)))
threads[-1].start()
for t in threads:
t.join()
lr.update_weights(train_op).run(feed_dict={
example_ids: [str(i) for i in range(len(example_weights))]})
# Test only the unregularized loss because the optimal value of the
# regularized loss depends on num_loss_partitions.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.02)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertNear(0.0, lr.approximate_duality_gap().eval(), 0.02)
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There is neither L1 nor L2 loss, so regularized and unregularized
# losses should be exactly the same.
self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
self.assertAllClose(0.40244, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 0),
# Will be used.
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0.1),
make_example_proto({
'age': [1],
'gender': [1]
}, 0.9),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [2],
'gender': [0]
}, 0),
make_example_proto({
'age': [3],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(
0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [0]
}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testOutOfRangeSparseFeatures(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(0, 0)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'indices.*'):
train_op.run()
def testOutOfRangeDenseFeatures(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[1.0, 0.0])
# Replace with a variable of size 1 instead of 2.
variables['dense_features_weights'] = [
variables_lib.VariableV1(array_ops.zeros(
[1], dtype=dtypes.float32))
]
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'More dense features than we have parameters for.*'):
train_op.run()
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
class SdcaWithLinearLossTest(SdcaModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose(
[-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
# 2 more identical examples
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0, -2.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose(
[-10 * 40.0 / 41.0, 14.0 * 24 / 25.0], predictions.eval(), atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0 / 3], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
class SdcaWithHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
class SdcaWithSmoothHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for smooth hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='smooth_hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
# an unregularized hinge loss of 0.33 and a 0.11 L2 loss
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
class SdcaWithPoissonLossTest(SdcaModelTest):
"""SDCA optimizer test class for poisson loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 2),
]
example_weights = [100.0, 100.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='poisson_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 1 for each example.
predictions = model.predictions(examples)
self.assertAllClose([1.0, 1.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
approximate_duality_gap = model.approximate_duality_gap()
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# There are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender
# (say w3 and w4). The minimization leads to:
# w1=w3=-1.96487, argmin of 100*(exp(2*w)-2*w*0)+w**2.
# w2=w4=0.345708, argmin of 100*(exp(2*w)-2*w*2)+w**2.
# This gives an unregularized loss of .3167 and .3366 with regularization.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([0.0196, 1.9965], predictions.eval(), atol=1e-4)
self.assertAllClose(0.3167, unregularized_loss.eval(), atol=1e-4)
self.assertAllClose(0.3366, regularized_loss.eval(), atol=1e-4)
self.assertAllClose(0., approximate_duality_gap.eval(), atol=1e-6)
class SdcaFprintTest(SdcaModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = constant_op.constant(['abc', 'very looooooong string', 'def'])
out_data = gen_sdca_ops.sdca_fprint(in_data)
self.assertAllEqual([[4143508125394299908, -6879828354153669051],
[5849691694103072671, -4874542629849009556],
[603227410218889250, 8762207001949257490]],
out_data.eval())
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_feature_column.py (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
class SparseFeatureColumnTest(TensorFlowTestCase):
"""Tests for SparseFeatureColumn.
"""
def testBasic(self):
expected_example_indices = [1, 1, 1, 2]
expected_feature_indices = [0, 1, 2, 0]
sfc = SparseFeatureColumn(expected_example_indices,
expected_feature_indices, None)
self.assertTrue(isinstance(sfc.example_indices, ops.Tensor))
self.assertTrue(isinstance(sfc.feature_indices, ops.Tensor))
self.assertEqual(sfc.feature_values, None)
with self.cached_session():
self.assertAllEqual(expected_example_indices, sfc.example_indices.eval())
self.assertAllEqual(expected_feature_indices, sfc.feature_indices.eval())
expected_feature_values = [1.0, 2.0, 3.0, 4.0]
sfc = SparseFeatureColumn([1, 1, 1, 2], [0, 1, 2, 0],
expected_feature_values)
with self.cached_session():
self.assertAllEqual(expected_feature_values, sfc.feature_values.eval())
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sharded_mutable_dense_hashtable.py (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linear_optimizer.python.ops.sharded_mutable_dense_hashtable import ShardedMutableDenseHashTable
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
class ShardedMutableDenseHashTableTest(TensorFlowTestCase):
"""Tests for the ShardedMutableHashTable class."""
def testShardedMutableHashTable(self):
for num_shards in [1, 3, 10]:
with self.cached_session():
default_val = -1
empty_key = 0
deleted_key = -1
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = ShardedMutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_val,
empty_key,
deleted_key,
num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 12, 14], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
self.assertAllEqual([0, 1, -1], output.eval())
def testShardedMutableHashTableVectors(self):
for num_shards in [1, 3, 10]:
with self.cached_session():
default_val = [-0.1, 0.2]
empty_key = [0, 1]
deleted_key = [1, 0]
keys = constant_op.constant([[11, 12], [13, 14], [15, 16]],
dtypes.int64)
values = constant_op.constant([[0.5, 0.6], [1.5, 1.6], [2.5, 2.6]],
dtypes.float32)
table = ShardedMutableDenseHashTable(
dtypes.int64,
dtypes.float32,
default_val,
empty_key,
deleted_key,
num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([[11, 12], [13, 14], [11, 14]],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
self.assertAllClose([[0.5, 0.6], [1.5, 1.6], [-0.1, 0.2]],
output.eval())
def testExportSharded(self):
with self.cached_session():
empty_key = -2
deleted_key = -3
default_val = -1
num_shards = 2
keys = constant_op.constant([10, 11, 12], dtypes.int64)
values = constant_op.constant([2, 3, 4], dtypes.int64)
table = ShardedMutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_val,
empty_key,
deleted_key,
num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
keys_list, values_list = table.export_sharded()
self.assertAllEqual(num_shards, len(keys_list))
self.assertAllEqual(num_shards, len(values_list))
# Exported keys include empty key buckets set to the empty_key
self.assertAllEqual(set([-2, 10, 12]), set(keys_list[0].eval().flatten()))
self.assertAllEqual(set([-2, 11]), set(keys_list[1].eval().flatten()))
# Exported values include empty value buckets set to 0
self.assertAllEqual(set([0, 2, 4]), set(values_list[0].eval().flatten()))
self.assertAllEqual(set([0, 3]), set(values_list[1].eval().flatten()))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/ops/sharded_mutable_dense_hashtable_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparse feature column (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.ops import internal_convert_to_tensor
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.util import deprecation
class SparseFeatureColumn(object):
"""Represents a sparse feature column.
Contains three tensors representing a sparse feature column, they are
example indices (`int64`), feature indices (`int64`), and feature
values (`float`).
Feature weights are optional, and are treated as `1.0f` if missing.
For example, consider a batch of 4 examples, which contains the following
features in a particular `SparseFeatureColumn`:
* Example 0: feature 5, value 1
* Example 1: feature 6, value 1 and feature 10, value 0.5
* Example 2: no features
* Example 3: two copies of feature 2, value 1
This SparseFeatureColumn will be represented as follows:
```
<0, 5, 1>
<1, 6, 1>
<1, 10, 0.5>
<3, 2, 1>
<3, 2, 1>
```
For a batch of 2 examples below:
* Example 0: feature 5
* Example 1: feature 6
is represented by `SparseFeatureColumn` as:
```
<0, 5, 1>
<1, 6, 1>
```
@@__init__
@@example_indices
@@feature_indices
@@feature_values
"""
@deprecation.deprecated(
None, 'This class is deprecated. To UPDATE or USE linear optimizers, '
'please check its latest version in core: '
'tensorflow_estimator/python/estimator/canned/linear_optimizer/.')
def __init__(self, example_indices, feature_indices, feature_values):
"""Creates a `SparseFeatureColumn` representation.
Args:
example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also,
accepts python lists, or numpy arrays.
Returns:
A `SparseFeatureColumn`
"""
with name_scope(None, 'SparseFeatureColumn',
[example_indices, feature_indices]):
self._example_indices = internal_convert_to_tensor(
example_indices, name='example_indices', dtype=dtypes.int64)
self._feature_indices = internal_convert_to_tensor(
feature_indices, name='feature_indices', dtype=dtypes.int64)
self._feature_values = None
if feature_values is not None:
with name_scope(None, 'SparseFeatureColumn', [feature_values]):
self._feature_values = internal_convert_to_tensor(
feature_values, name='feature_values', dtype=dtypes.float32)
@property
def example_indices(self):
"""The example indices represented as a dense tensor.
Returns:
A 1-D Tensor of int64 with shape `[N]`.
"""
return self._example_indices
@property
def feature_indices(self):
"""The feature indices represented as a dense tensor.
Returns:
A 1-D Tensor of int64 with shape `[N]`.
"""
return self._feature_indices
@property
def feature_values(self):
"""The feature values represented as a dense tensor.
Returns:
May return None, or a 1-D Tensor of float32 with shape `[N]`.
"""
return self._feature_values
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Proximal stochastic dual coordinate ascent optimizer for linear models (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
from tensorflow.contrib.linear_optimizer.python.ops.sharded_mutable_dense_hashtable import ShardedMutableDenseHashTable
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework.ops import internal_convert_to_tensor
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_sdca_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as var_ops
from tensorflow.python.ops.nn import log_poisson_loss
from tensorflow.python.ops.nn import sigmoid_cross_entropy_with_logits
from tensorflow.python.summary import summary
from tensorflow.python.util import deprecation
__all__ = ['SdcaModel']
# TODO(sibyl-Aix6ihai): add name_scope to appropriate methods.
class SdcaModel(object):
"""Stochastic dual coordinate ascent solver for linear models.
Loss functions supported:
* Binary logistic loss
* Squared loss
* Hinge loss
* Smooth hinge loss
* Poisson log loss
This class defines an optimizer API to train a linear model.
### Usage
```python
# Create a solver with the desired parameters.
lr = tf.contrib.linear_optimizer.SdcaModel(examples, variables, options)
min_op = lr.minimize()
opt_op = lr.update_weights(min_op)
predictions = lr.predictions(examples)
# Primal loss + L1 loss + L2 loss.
regularized_loss = lr.regularized_loss(examples)
# Primal loss only
unregularized_loss = lr.unregularized_loss(examples)
examples: {
sparse_features: list of SparseFeatureColumn.
dense_features: list of dense tensors of type float32.
example_labels: a tensor of type float32 and shape [Num examples]
example_weights: a tensor of type float32 and shape [Num examples]
example_ids: a tensor of type string and shape [Num examples]
}
variables: {
sparse_features_weights: list of tensors of shape [vocab size]
dense_features_weights: list of tensors of shape [dense_feature_dimension]
}
options: {
symmetric_l1_regularization: 0.0
symmetric_l2_regularization: 1.0
loss_type: "logistic_loss"
num_loss_partitions: 1 (Optional, with default value of 1. Number of
partitions of the global loss function, 1 means single machine solver,
and >1 when we have more than one optimizer working concurrently.)
num_table_shards: 1 (Optional, with default value of 1. Number of shards
of the internal state table, typically set to match the number of
parameter servers for large data sets.
}
```
In the training program you will just have to run the returned Op from
minimize().
```python
# Execute opt_op and train for num_steps.
for _ in range(num_steps):
opt_op.run()
# You can also check for convergence by calling
lr.approximate_duality_gap()
```
"""
@deprecation.deprecated(
None, 'This class is deprecated. To UPDATE or USE linear optimizers, '
'please check its latest version in core: '
'tensorflow_estimator/python/estimator/canned/linear_optimizer/.')
def __init__(self, examples, variables, options):
"""Create a new sdca optimizer."""
if not examples or not variables or not options:
raise ValueError('examples, variables and options must all be specified.')
supported_losses = ('logistic_loss', 'squared_loss', 'hinge_loss',
'smooth_hinge_loss', 'poisson_loss')
if options['loss_type'] not in supported_losses:
raise ValueError('Unsupported loss_type: ', options['loss_type'])
self._assertSpecified([
'example_labels', 'example_weights', 'example_ids', 'sparse_features',
'dense_features'
], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
self._assertSpecified(['sparse_features_weights', 'dense_features_weights'],
variables)
self._assertList(['sparse_features_weights', 'dense_features_weights'],
variables)
self._assertSpecified([
'loss_type', 'symmetric_l2_regularization',
'symmetric_l1_regularization'
], options)
for name in ['symmetric_l1_regularization', 'symmetric_l2_regularization']:
value = options[name]
if value < 0.0:
raise ValueError('%s should be non-negative. Found (%f)' %
(name, value))
self._examples = examples
self._variables = variables
self._options = options
self._create_slots()
self._hashtable = ShardedMutableDenseHashTable(
key_dtype=dtypes.int64,
value_dtype=dtypes.float32,
num_shards=self._num_table_shards(),
default_value=[0.0, 0.0, 0.0, 0.0],
# SdcaFprint never returns 0 or 1 for the low64 bits, so this a safe
# empty_key (that will never collide with actual payloads).
empty_key=[0, 0],
deleted_key=[1, 1])
summary.scalar('approximate_duality_gap', self.approximate_duality_gap())
summary.scalar('examples_seen', self._hashtable.size())
def _symmetric_l1_regularization(self):
return self._options['symmetric_l1_regularization']
def _symmetric_l2_regularization(self):
# Algorithmic requirement (for now) is to have minimal l2 of 1.0.
return max(self._options['symmetric_l2_regularization'], 1.0)
def _num_loss_partitions(self):
# Number of partitions of the global objective.
# TODO(andreasst): set num_loss_partitions automatically based on the number
# of workers
return self._options.get('num_loss_partitions', 1)
def _adaptive(self):
# Perform adaptive sampling.
return self._options.get('adaptive', True)
def _num_table_shards(self):
# Number of hash table shards.
# Return 1 if not specified or if the value is 'None'
# TODO(andreasst): set num_table_shards automatically based on the number
# of parameter servers
num_shards = self._options.get('num_table_shards')
return 1 if num_shards is None else num_shards
# TODO(sibyl-Aix6ihai): Use optimizer interface to make use of slot creation logic.
def _create_slots(self):
"""Make unshrinked internal variables (slots)."""
# Unshrinked variables have the updates before applying L1 regularization.
# Each unshrinked slot variable is either a `Variable` or list of
# `Variable`, depending on the value of its corresponding primary variable.
# We avoid using `PartitionedVariable` for the unshrinked slots since we do
# not need any of the extra information.
self._slots = collections.defaultdict(list)
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
# Our primary variable may be either a PartitionedVariable, or a list
# of Variables (each representing a partition).
if (isinstance(var, var_ops.PartitionedVariable) or
isinstance(var, list)):
var_list = []
# pylint: disable=protected-access
for v in var:
with ops.colocate_with(v):
# TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109
# is fixed.
slot_var = var_ops.VariableV1(
initial_value=array_ops.zeros_like(v.initialized_value(),
dtypes.float32),
name=v.op.name + '_unshrinked/SDCAOptimizer')
var_list.append(slot_var)
self._slots['unshrinked_' + name].append(var_list)
# pylint: enable=protected-access
else:
with ops.device(var.device):
# TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109 is
# fixed.
self._slots['unshrinked_' + name].append(
var_ops.VariableV1(
array_ops.zeros_like(var.initialized_value(),
dtypes.float32),
name=var.op.name + '_unshrinked/SDCAOptimizer'))
def _assertSpecified(self, items, check_in):
for x in items:
if check_in[x] is None:
raise ValueError(check_in[x] + ' must be specified.')
def _assertList(self, items, check_in):
for x in items:
if not isinstance(check_in[x], list):
raise ValueError(x + ' must be a list.')
def _var_to_list(self, var):
"""Wraps var in a list if it is not a list or PartitionedVariable."""
if not (isinstance(var, list) or
isinstance(var, var_ops.PartitionedVariable)):
var = [var]
return var
def _l1_loss(self):
"""Computes the (un-normalized) l1 loss of the model."""
with name_scope('sdca/l1_loss'):
sums = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
for v in self._var_to_list(var):
weights = internal_convert_to_tensor(v)
with ops.device(weights.device):
sums.append(
math_ops.reduce_sum(
math_ops.abs(math_ops.cast(weights, dtypes.float64))))
# SDCA L1 regularization cost is: l1 * sum(|weights|)
return self._options['symmetric_l1_regularization'] * math_ops.add_n(sums)
def _l2_loss(self, l2):
"""Computes the (un-normalized) l2 loss of the model."""
with name_scope('sdca/l2_loss'):
sums = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
for v in self._var_to_list(var):
weights = internal_convert_to_tensor(v)
with ops.device(weights.device):
sums.append(math_ops.reduce_sum(math_ops.square(math_ops.cast(
weights, dtypes.float64))))
# SDCA L2 regularization cost is: l2 * sum(weights^2) / 2
return l2 * math_ops.add_n(sums) / 2.0
def _convert_n_to_tensor(self, input_list, as_ref=False):
"""Converts input list to a set of tensors."""
# input_list can be a list of Variables (that are implicitly partitioned),
# in which case the underlying logic in internal_convert_to_tensor will not
# concatenate the partitions together. This method takes care of the
# concatenating (we only allow partitioning on the first axis).
output_list = []
for x in input_list:
tensor_to_convert = x
if isinstance(x, list) or isinstance(x, var_ops.PartitionedVariable):
# We only allow for partitioning on the first axis.
tensor_to_convert = array_ops.concat(x, axis=0)
output_list.append(internal_convert_to_tensor(
tensor_to_convert, as_ref=as_ref))
return output_list
def _get_first_dimension_size_statically(self, w, num_partitions):
"""Compute the static size of the first dimension for a sharded variable."""
dim_0_size = w[0].get_shape()[0]
for p in range(1, num_partitions):
dim_0_size += w[p].get_shape()[0]
return dim_0_size
def _linear_predictions(self, examples):
"""Returns predictions of the form w*x."""
with name_scope('sdca/prediction'):
sparse_variables = self._convert_n_to_tensor(self._variables[
'sparse_features_weights'])
result_sparse = 0.0
for sfc, sv in zip(examples['sparse_features'], sparse_variables):
# TODO(sibyl-Aix6ihai): following does not take care of missing features.
result_sparse += math_ops.segment_sum(
math_ops.multiply(
array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
sfc.example_indices)
dense_features = self._convert_n_to_tensor(examples['dense_features'])
dense_variables = self._convert_n_to_tensor(self._variables[
'dense_features_weights'])
result_dense = 0.0
for i in range(len(dense_variables)):
result_dense += math_ops.matmul(dense_features[i],
array_ops.expand_dims(
dense_variables[i], -1))
# Reshaping to allow shape inference at graph construction time.
return array_ops.reshape(result_dense, [-1]) + result_sparse
def predictions(self, examples):
"""Add operations to compute predictions by the model.
If logistic_loss is being used, predicted probabilities are returned.
If poisson_loss is being used, predictions are exponentiated.
Otherwise, (raw) linear predictions (w*x) are returned.
Args:
examples: Examples to compute predictions on.
Returns:
An Operation that computes the predictions for examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_weights', 'sparse_features', 'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
result = self._linear_predictions(examples)
if self._options['loss_type'] == 'logistic_loss':
# Convert logits to probability for logistic loss predictions.
with name_scope('sdca/logistic_prediction'):
result = math_ops.sigmoid(result)
elif self._options['loss_type'] == 'poisson_loss':
# Exponeniate the prediction for poisson loss predictions.
with name_scope('sdca/poisson_prediction'):
result = math_ops.exp(result)
return result
def _get_partitioned_update_ops(self,
v_num,
num_partitions_by_var,
p_assignments_by_var,
gather_ids_by_var,
weights,
full_update,
p_assignments,
num_partitions):
"""Get updates for partitioned variables."""
num_partitions = num_partitions_by_var[v_num]
p_assignments = p_assignments_by_var[v_num]
gather_ids = gather_ids_by_var[v_num]
updates = data_flow_ops.dynamic_partition(
full_update, p_assignments, num_partitions)
update_ops = []
for p in range(num_partitions):
with ops.colocate_with(weights[p]):
result = state_ops.scatter_add(weights[p], gather_ids[p], updates[p])
update_ops.append(result)
return update_ops
def minimize(self, global_step=None, name=None):
"""Add operations to train a linear model by minimizing the loss function.
Args:
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables passed in the constructor.
"""
# Technically, the op depends on a lot more than the variables,
# but we'll keep the list short.
with name_scope(name, 'sdca/minimize'):
sparse_example_indices = []
sparse_feature_indices = []
sparse_features_values = []
for sf in self._examples['sparse_features']:
sparse_example_indices.append(sf.example_indices)
sparse_feature_indices.append(sf.feature_indices)
# If feature values are missing, sdca assumes a value of 1.0f.
if sf.feature_values is not None:
sparse_features_values.append(sf.feature_values)
# pylint: disable=protected-access
example_ids_hashed = gen_sdca_ops.sdca_fprint(
internal_convert_to_tensor(self._examples['example_ids']))
# pylint: enable=protected-access
example_state_data = self._hashtable.lookup(example_ids_hashed)
# Solver returns example_state_update, new delta sparse_feature_weights
# and delta dense_feature_weights.
sparse_weights = []
sparse_indices = []
# If we have partitioned variables, keep a few dictionaries of Tensors
# around that we need for the assign_add after the op call to
# gen_sdca_ops.sdca_optimizer(). These are keyed because we may have a
# mix of partitioned and un-partitioned variables.
num_partitions_by_var = {}
p_assignments_by_var = {}
gather_ids_by_var = {}
for v_num, (w, i) in enumerate(
zip(self._slots['unshrinked_sparse_features_weights'],
sparse_feature_indices)):
# Append the sparse_indices (in full-variable space).
sparse_idx = math_ops.cast(
array_ops.unique(math_ops.cast(i, dtypes.int32))[0],
dtypes.int64)
sparse_indices.append(sparse_idx)
if isinstance(w, list) or isinstance(w, var_ops.PartitionedVariable):
num_partitions = len(w)
flat_ids = array_ops.reshape(sparse_idx, [-1])
# We use div partitioning, which is easiest to support downstream.
# Compute num_total_ids as the sum of dim-0 of w, then assign
# to partitions based on a constant number of ids per partition.
# Optimize if we already know the full shape statically.
dim_0_size = self._get_first_dimension_size_statically(
w, num_partitions)
if tensor_shape.dimension_value(dim_0_size):
num_total_ids = constant_op.constant(
tensor_shape.dimension_value(dim_0_size),
flat_ids.dtype)
else:
dim_0_sizes = []
for p in range(num_partitions):
if tensor_shape.dimension_value(w[p].shape[0]) is not None:
dim_0_sizes.append(tensor_shape.dimension_value(w[p].shape[0]))
else:
with ops.colocate_with(w[p]):
dim_0_sizes.append(array_ops.shape(w[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // num_partitions
extras = num_total_ids % num_partitions
p_assignments = math_ops.maximum(
flat_ids // (ids_per_partition + 1),
(flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
new_ids = array_ops.where(p_assignments < extras,
flat_ids % (ids_per_partition + 1),
(flat_ids - extras) % ids_per_partition)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into num_partitions
# separate lists.
gather_ids = data_flow_ops.dynamic_partition(new_ids,
p_assignments,
num_partitions)
# Add these into the dictionaries for use in the later update.
num_partitions_by_var[v_num] = num_partitions
p_assignments_by_var[v_num] = p_assignments
gather_ids_by_var[v_num] = gather_ids
# Gather the weights from each partition.
partition_gathered_weights = []
for p in range(num_partitions):
with ops.colocate_with(w[p]):
partition_gathered_weights.append(
array_ops.gather(w[p], gather_ids[p]))
# Stitch the weights back together in the same order they were before
# we dynamic_partitioned them.
condition_indices = data_flow_ops.dynamic_partition(
math_ops.range(array_ops.shape(new_ids)[0]),
p_assignments, num_partitions)
batch_gathered_weights = data_flow_ops.dynamic_stitch(
condition_indices, partition_gathered_weights)
else:
w_as_tensor = internal_convert_to_tensor(w)
with ops.device(w_as_tensor.device):
batch_gathered_weights = array_ops.gather(
w_as_tensor, sparse_idx)
sparse_weights.append(batch_gathered_weights)
# pylint: disable=protected-access
if compat.forward_compatible(year=2018, month=10, day=30):
esu, sfw, dfw = gen_sdca_ops.sdca_optimizer_v2(
sparse_example_indices,
sparse_feature_indices,
sparse_features_values,
self._convert_n_to_tensor(self._examples['dense_features']),
internal_convert_to_tensor(self._examples['example_weights']),
internal_convert_to_tensor(self._examples['example_labels']),
sparse_indices,
sparse_weights,
self._convert_n_to_tensor(self._slots[
'unshrinked_dense_features_weights']),
example_state_data,
loss_type=self._options['loss_type'],
l1=self._options['symmetric_l1_regularization'],
l2=self._symmetric_l2_regularization(),
num_loss_partitions=self._num_loss_partitions(),
num_inner_iterations=1,
adaptive=self._adaptive())
else:
esu, sfw, dfw = gen_sdca_ops.sdca_optimizer(
sparse_example_indices,
sparse_feature_indices,
sparse_features_values,
self._convert_n_to_tensor(self._examples['dense_features']),
internal_convert_to_tensor(self._examples['example_weights']),
internal_convert_to_tensor(self._examples['example_labels']),
sparse_indices,
sparse_weights,
self._convert_n_to_tensor(self._slots[
'unshrinked_dense_features_weights']),
example_state_data,
loss_type=self._options['loss_type'],
l1=self._options['symmetric_l1_regularization'],
l2=self._symmetric_l2_regularization(),
num_loss_partitions=self._num_loss_partitions(),
num_inner_iterations=1,
adaptative=self._adaptive())
# pylint: enable=protected-access
with ops.control_dependencies([esu]):
update_ops = [self._hashtable.insert(example_ids_hashed, esu)]
# Update the weights before the proximal step.
for v_num, (w, i, u) in enumerate(
zip(self._slots['unshrinked_sparse_features_weights'],
sparse_indices, sfw)):
if (isinstance(w, var_ops.PartitionedVariable) or
isinstance(w, list)):
update_ops += self._get_partitioned_update_ops(
v_num, num_partitions_by_var, p_assignments_by_var,
gather_ids_by_var, w, u, p_assignments, num_partitions)
else:
update_ops.append(state_ops.scatter_add(w, i, u))
for w, u in zip(self._slots['unshrinked_dense_features_weights'], dfw):
if (isinstance(w, var_ops.PartitionedVariable) or
isinstance(w, list)):
split_updates = array_ops.split(
u, num_or_size_splits=[v.shape.as_list()[0] for v in w])
for v, split_update in zip(w, split_updates):
update_ops.append(state_ops.assign_add(v, split_update))
else:
update_ops.append(state_ops.assign_add(w, u))
if not global_step:
return control_flow_ops.group(*update_ops)
with ops.control_dependencies(update_ops):
return state_ops.assign_add(global_step, 1, name=name).op
def update_weights(self, train_op):
"""Updates the model weights.
This function must be called on at least one worker after `minimize`.
In distributed training this call can be omitted on non-chief workers to
speed up training.
Args:
train_op: The operation returned by the `minimize` call.
Returns:
An Operation that updates the model weights.
"""
with ops.control_dependencies([train_op]):
update_ops = []
# Copy over unshrinked weights to user provided variables.
for name in ['sparse_features_weights', 'dense_features_weights']:
for var, slot_var in zip(self._variables[name],
self._slots['unshrinked_' + name]):
for v, sv in zip(self._var_to_list(var), self._var_to_list(slot_var)):
update_ops.append(v.assign(sv))
# Apply proximal step.
with ops.control_dependencies(update_ops):
update_ops = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
for v in self._var_to_list(var):
with ops.device(v.device):
# pylint: disable=protected-access
update_ops.append(
gen_sdca_ops.sdca_shrink_l1(
self._convert_n_to_tensor([v], as_ref=True),
l1=self._symmetric_l1_regularization(),
l2=self._symmetric_l2_regularization()))
return control_flow_ops.group(*update_ops)
def approximate_duality_gap(self):
"""Add operations to compute the approximate duality gap.
Returns:
An Operation that computes the approximate duality gap over all
examples.
"""
with name_scope('sdca/approximate_duality_gap'):
_, values_list = self._hashtable.export_sharded()
shard_sums = []
for values in values_list:
with ops.device(values.device):
# For large tables to_double() below allocates a large temporary
# tensor that is freed once the sum operation completes. To reduce
# peak memory usage in cases where we have multiple large tables on a
# single device, we serialize these operations.
# Note that we need double precision to get accurate results.
with ops.control_dependencies(shard_sums):
shard_sums.append(
math_ops.reduce_sum(math_ops.cast(values, dtypes.float64), 0))
summed_values = math_ops.add_n(shard_sums)
primal_loss = summed_values[1]
dual_loss = summed_values[2]
example_weights = summed_values[3]
# Note: we return NaN if there are no weights or all weights are 0, e.g.
# if no examples have been processed
return (primal_loss + dual_loss + self._l1_loss() +
(2.0 * self._l2_loss(self._symmetric_l2_regularization()))
) / example_weights
def unregularized_loss(self, examples):
"""Add operations to compute the loss (without the regularization loss).
Args:
examples: Examples to compute unregularized loss on.
Returns:
An Operation that computes mean (unregularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified([
'example_labels', 'example_weights', 'sparse_features', 'dense_features'
], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/unregularized_loss'):
predictions = math_ops.cast(
self._linear_predictions(examples), dtypes.float64)
labels = math_ops.cast(
internal_convert_to_tensor(examples['example_labels']),
dtypes.float64)
weights = math_ops.cast(
internal_convert_to_tensor(examples['example_weights']),
dtypes.float64)
if self._options['loss_type'] == 'logistic_loss':
return math_ops.reduce_sum(math_ops.multiply(
sigmoid_cross_entropy_with_logits(labels=labels,
logits=predictions),
weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] == 'poisson_loss':
return math_ops.reduce_sum(math_ops.multiply(
log_poisson_loss(targets=labels, log_input=predictions),
weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
# hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
# first convert 0/1 labels into -1/1 labels.
all_ones = array_ops.ones_like(predictions)
adjusted_labels = math_ops.subtract(2 * labels, all_ones)
# Tensor that contains (unweighted) error (hinge loss) per
# example.
error = nn_ops.relu(
math_ops.subtract(all_ones,
math_ops.multiply(adjusted_labels, predictions)))
weighted_error = math_ops.multiply(error, weights)
return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
weights)
# squared loss
err = math_ops.subtract(labels, predictions)
weighted_squared_err = math_ops.multiply(math_ops.square(err), weights)
# SDCA squared loss function is sum(err^2) / (2*sum(weights))
return (math_ops.reduce_sum(weighted_squared_err) /
(2.0 * math_ops.reduce_sum(weights)))
def regularized_loss(self, examples):
"""Add operations to compute the loss with regularization loss included.
Args:
examples: Examples to compute loss on.
Returns:
An Operation that computes mean (regularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified([
'example_labels', 'example_weights', 'sparse_features', 'dense_features'
], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/regularized_loss'):
weights = internal_convert_to_tensor(examples['example_weights'])
return ((
self._l1_loss() +
# Note that here we are using the raw regularization
# (as specified by the user) and *not*
# self._symmetric_l2_regularization().
self._l2_loss(self._options['symmetric_l2_regularization'])) /
math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) +
self.unregularized_loss(examples))
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sharded mutable dense hash table (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from tensorflow.contrib import lookup
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
# TODO(rohanj): This should subclass Trackable and implement
# _gather_saveables_for_checkpoint.
class ShardedMutableDenseHashTable(object):
"""A sharded version of MutableDenseHashTable.
It is designed to be interface compatible with LookupInterface and
MutableDenseHashTable, with the exception of the export method, which is
replaced by an export_sharded method.
The _ShardedMutableDenseHashTable keeps `num_shards` MutableDenseHashTable
internally. The shard is computed via the modulo operation on the key.
"""
# TODO(andreasst): consider moving this to lookup module
@deprecation.deprecated(
None, 'This class is deprecated. To UPDATE or USE linear optimizers, '
'please check its latest version in core: '
'tensorflow_estimator/python/estimator/canned/linear_optimizer/.')
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
deleted_key,
num_shards=1,
checkpoint=True,
name='ShardedMutableHashTable'):
self._key_dtype = key_dtype
self._value_dtype = value_dtype
with ops.name_scope(name, 'sharded_mutable_hash_table') as scope:
self._table_name = scope
table_shards = []
for i in range(num_shards):
table_shards.append(
lookup.MutableDenseHashTable(
key_dtype=key_dtype,
value_dtype=value_dtype,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
checkpoint=checkpoint,
name='%s-%d-of-%d' % (name, i + 1, num_shards)))
self._table_shards = table_shards
# TODO(andreasst): add a value_shape() method to LookupInterface
# pylint: disable=protected-access
self._value_shape = self._table_shards[0]._value_shape
# pylint: enable=protected-access
@property
def name(self):
return self._table_name
@property
def _num_shards(self):
return len(self._table_shards)
@property
def table_shards(self):
return self._table_shards
def size(self, name=None):
with ops.name_scope(name, 'sharded_mutable_hash_table_size'):
sizes = [
self._table_shards[i].size() for i in range(self._num_shards)
]
return math_ops.add_n(sizes)
def _shard_indices(self, keys):
key_shape = keys.get_shape()
if key_shape.ndims > 1:
# If keys are a matrix (i.e. a single key is a vector), we use the first
# element of each key vector to determine the shard.
keys = array_ops.slice(keys, [0, 0], [key_shape.dims[0].value, 1])
keys = array_ops.reshape(keys, [-1])
indices = math_ops.mod(math_ops.abs(keys), self._num_shards)
return math_ops.cast(indices, dtypes.int32)
def _check_keys(self, keys):
if not keys.get_shape().is_fully_defined():
raise ValueError('Key shape must be fully defined, got %s.' %
keys.get_shape())
if keys.get_shape().ndims != 1 and keys.get_shape().ndims != 2:
raise ValueError('Expected a vector or matrix for keys, got %s.' %
keys.get_shape())
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError('Signature mismatch. Keys must be dtype %s, got %s.' %
(self._key_dtype, keys.dtype))
self._check_keys(keys)
num_shards = self._num_shards
if num_shards == 1:
return self._table_shards[0].lookup(keys, name=name)
shard_indices = self._shard_indices(keys)
# TODO(andreasst): support 'keys' that are not vectors
key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
num_shards)
value_shards = [
self._table_shards[i].lookup(key_shards[i], name=name)
for i in range(num_shards)
]
num_keys = keys.get_shape().dims[0]
original_indices = math_ops.range(num_keys)
partitioned_indices = data_flow_ops.dynamic_partition(original_indices,
shard_indices,
num_shards)
result = data_flow_ops.dynamic_stitch(partitioned_indices, value_shards)
result.set_shape(
tensor_shape.TensorShape([num_keys]).concatenate(self._value_shape))
return result
def insert(self, keys, values, name=None):
"""Inserts `keys` in a table."""
self._check_keys(keys)
num_shards = self._num_shards
if num_shards == 1:
return self._table_shards[0].insert(keys, values, name=name)
shard_indices = self._shard_indices(keys)
# TODO(andreasst): support 'keys' that are not vectors
key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
num_shards)
value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
num_shards)
return_values = [
self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
for i in range(num_shards)
]
return control_flow_ops.group(*return_values)
def export_sharded(self, name=None):
"""Returns lists of the keys and values tensors in the sharded table.
Args:
name: name of the table.
Returns:
A pair of lists with the first list containing the key tensors and the
second list containing the value tensors from each shard.
"""
keys_list = []
values_list = []
for table_shard in self._table_shards:
exported_keys, exported_values = table_shard.export(name=name)
keys_list.append(exported_keys)
values_list.append(exported_values)
return keys_list, values_list
|
tensorflow-master
|
tensorflow/contrib/linear_optimizer/python/ops/sharded_mutable_dense_hashtable.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning (DEPRECATED).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@read_keyed_batch_examples
@@read_keyed_batch_examples_shared_queue
@@read_keyed_batch_features
@@read_keyed_batch_features_shared_queue
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/learn/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning with TensorFlow (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/learn/python/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ExportStrategy class represents different flavors of model export (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.deprecation import deprecated
__all__ = ['ExportStrategy']
class ExportStrategy(
collections.namedtuple('ExportStrategy',
['name', 'export_fn', 'strip_default_attrs'])):
"""A class representing a type of model export.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Typically constructed by a utility function specific to the exporter, such as
`saved_model_export_utils.make_export_strategy()`.
Attributes:
name: The directory name under the export base directory where exports of
this type will be written.
export_fn: A function that writes an export, given an estimator, a
destination path, and optionally a checkpoint path and an evaluation
result for that checkpoint. This export_fn() may be run repeatedly during
continuous training, or just once at the end of fixed-length training.
Note the export_fn() may choose whether or not to export based on the eval
result or based on an internal timer or any other criterion, if exports
are not desired for every checkpoint.
The signature of this function must be one of:
* `(estimator, export_path) -> export_path`
* `(estimator, export_path, checkpoint_path) -> export_path`
* `(estimator, export_path, checkpoint_path, eval_result) -> export_path`
* `(estimator, export_path, checkpoint_path, eval_result,
strip_default_attrs) -> export_path`
strip_default_attrs: (Optional) Boolean. If set as True, default attrs in
the `GraphDef` will be stripped on write. This is recommended for better
forward compatibility of the resulting `SavedModel`.
"""
@deprecated(None, 'Please switch to tf.estimator.train_and_evaluate, and use '
'tf.estimator.Exporter.')
def __new__(cls, name, export_fn, strip_default_attrs=None):
return super(ExportStrategy, cls).__new__(
cls, name, export_fn, strip_default_attrs)
def export(self,
estimator,
export_path,
checkpoint_path=None,
eval_result=None):
"""Exports the given Estimator to a specific format.
Args:
estimator: the Estimator to export.
export_path: A string containing a directory where to write the export.
checkpoint_path: The checkpoint path to export. If None (the default),
the strategy may locate a checkpoint (e.g. the most recent) by itself.
eval_result: The output of Estimator.evaluate on this checkpoint. This
should be set only if checkpoint_path is provided (otherwise it is
unclear which checkpoint this eval refers to).
Returns:
The string path to the exported directory.
Raises:
ValueError: if the export_fn does not have the required signature
"""
# don't break existing export_fns that don't accept checkpoint_path and
# eval_result
export_fn_args = tf_inspect.getargspec(self.export_fn).args
kwargs = {}
if 'checkpoint_path' in export_fn_args:
kwargs['checkpoint_path'] = checkpoint_path
if 'eval_result' in export_fn_args:
if 'checkpoint_path' not in export_fn_args:
raise ValueError('An export_fn accepting eval_result must also accept '
'checkpoint_path.')
kwargs['eval_result'] = eval_result
if 'strip_default_attrs' in export_fn_args:
kwargs['strip_default_attrs'] = self.strip_default_attrs
return self.export_fn(estimator, export_path, **kwargs)
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/export_strategy.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import shutil
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import testing
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import estimators
from tensorflow.python.client import session as session_lib
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training_util
class _MyEveryN(learn.monitors.EveryN):
def __init__(self, every_n_steps=100, first_n_steps=1):
super(_MyEveryN, self).__init__(
every_n_steps=every_n_steps, first_n_steps=first_n_steps)
self._steps_begun = []
self._steps_ended = []
self._post_steps = []
@property
def steps_begun(self):
return self._steps_begun
@property
def steps_ended(self):
return self._steps_ended
@property
def post_steps(self):
return self._post_steps
def every_n_step_begin(self, step):
super(_MyEveryN, self).every_n_step_begin(step)
self._steps_begun.append(step)
return []
def every_n_step_end(self, step, outputs):
super(_MyEveryN, self).every_n_step_end(step, outputs)
self._steps_ended.append(step)
return False
def every_n_post_step(self, step, session):
super(_MyEveryN, self).every_n_post_step(step, session)
self._post_steps.append(step)
return False
class MonitorsTest(test.TestCase):
"""Monitors tests."""
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = logging.info
def mockLog(*args, **kwargs): # pylint: disable=invalid-name
self.logged_message = args
self._actual_log(*args, **kwargs)
logging.info = mockLog
def tearDown(self):
logging.info = self._actual_log
def _run_monitor(self,
monitor,
num_epochs=3,
num_steps_per_epoch=10,
pass_max_steps=True):
if pass_max_steps:
max_steps = num_epochs * num_steps_per_epoch - 1
else:
max_steps = None
monitor.begin(max_steps=max_steps)
for epoch in xrange(num_epochs):
monitor.epoch_begin(epoch)
should_stop = False
step = epoch * num_steps_per_epoch
next_epoch_step = step + num_steps_per_epoch
while (not should_stop) and (step < next_epoch_step):
tensors = monitor.step_begin(step)
output = ops.get_default_session().run(tensors) if tensors else {}
output = dict(
zip([t.name if isinstance(t, ops.Tensor) else t for t in tensors],
output))
should_stop = monitor.step_end(step=step, output=output)
monitor.post_step(step=step, session=None)
step += 1
monitor.epoch_end(epoch)
monitor.end()
def test_base_monitor(self):
with ops.Graph().as_default() as g, self.session(g):
self._run_monitor(learn.monitors.BaseMonitor())
def test_every_0(self):
monitor = _MyEveryN(every_n_steps=0, first_n_steps=-1)
with ops.Graph().as_default() as g, self.session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(30))
self.assertAllEqual(expected_steps, monitor.steps_begun)
self.assertAllEqual(expected_steps, monitor.steps_ended)
self.assertAllEqual(expected_steps, monitor.post_steps)
def test_every_1(self):
monitor = _MyEveryN(every_n_steps=1, first_n_steps=-1)
with ops.Graph().as_default() as g, self.session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(1, 30))
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_2(self):
monitor = _MyEveryN(every_n_steps=2, first_n_steps=-1)
with ops.Graph().as_default() as g, self.session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(2, 29, 2)) + [29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_8(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with ops.Graph().as_default() as g, self.session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_8_no_max_steps(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with ops.Graph().as_default() as g, self.session(g):
self._run_monitor(
monitor, num_epochs=3, num_steps_per_epoch=10, pass_max_steps=False)
begin_end_steps = [0, 1, 2, 10, 18, 26]
post_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(begin_end_steps, monitor.steps_begun)
self.assertEqual(begin_end_steps, monitor.steps_ended)
self.assertEqual(post_steps, monitor.post_steps)
def test_every_8_recovered_after_step_begin(self):
monitor = _MyEveryN(every_n_steps=8)
with ops.Graph().as_default() as g, self.session(g):
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
# It should call begin again since, end was not called
self.assertEqual([8, 8, 16, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_every_8_recovered_after_step_end(self):
monitor = _MyEveryN(every_n_steps=8)
with ops.Graph().as_default() as g, self.session(g):
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_every_8_call_post_step_at_the_end(self):
monitor = _MyEveryN(every_n_steps=8)
with ops.Graph().as_default() as g, self.session(g):
monitor.begin()
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(19)
monitor.step_end(19, output=None)
monitor.post_step(19, session=None)
monitor.end(session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16, 19], monitor.post_steps)
def test_every_8_call_post_step_should_not_be_called_twice(self):
monitor = _MyEveryN(every_n_steps=8)
with ops.Graph().as_default() as g, self.session(g):
monitor.begin()
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(16)
monitor.step_end(16, output=None)
monitor.post_step(16, session=None)
monitor.end(session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_print(self):
with ops.Graph().as_default() as g, self.session(g):
t = constant_op.constant(42.0, name='foo')
self._run_monitor(learn.monitors.PrintTensor(tensor_names=[t.name]))
self.assertRegexpMatches(str(self.logged_message), t.name)
def test_logging_trainable(self):
with ops.Graph().as_default() as g, self.session(g):
var = variables.VariableV1(constant_op.constant(42.0), name='foo')
var.initializer.run()
cof = constant_op.constant(1.0)
loss = math_ops.subtract(
math_ops.multiply(var, cof), constant_op.constant(1.0))
train_step = gradient_descent.GradientDescentOptimizer(0.5).minimize(loss)
ops.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
def test_summary_saver(self):
with ops.Graph().as_default() as g, self.session(g):
log_dir = 'log/dir'
summary_writer = testing.FakeSummaryWriter(log_dir, g)
var = variables.VariableV1(0.0)
var.initializer.run()
tensor = state_ops.assign_add(var, 1.0)
summary_op = summary.scalar('my_summary', tensor)
self._run_monitor(
learn.monitors.SummarySaver(
summary_op=summary_op,
save_steps=8,
summary_writer=summary_writer),
num_epochs=3,
num_steps_per_epoch=10)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=log_dir,
expected_graph=g,
expected_summaries={
0: {
'my_summary': 1.0
},
1: {
'my_summary': 2.0
},
9: {
'my_summary': 3.0
},
17: {
'my_summary': 4.0
},
25: {
'my_summary': 5.0
},
29: {
'my_summary': 6.0
},
})
def _assert_validation_monitor(self,
monitor,
expected_early_stopped=False,
expected_best_step=None,
expected_best_value=None,
expected_best_metrics=None):
self.assertEqual(expected_early_stopped, monitor.early_stopped)
self.assertEqual(expected_best_step, monitor.best_step)
self.assertEqual(expected_best_value, monitor.best_value)
self.assertEqual(expected_best_metrics, monitor.best_metrics)
def test_validation_monitor_no_estimator(self):
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
with ops.Graph().as_default() as g, self.session(g):
with self.assertRaisesRegexp(ValueError, 'set_estimator'):
self._run_monitor(monitor)
@test.mock.patch.object(estimators, 'Estimator', autospec=True)
@test.mock.patch.object(checkpoint_management, 'latest_checkpoint')
def test_validation_monitor_no_ckpt(self, mock_latest_checkpoint,
mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
mock_latest_checkpoint.return_value = None
# Do nothing with no checkpoint.
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.session(g):
self._run_monitor(monitor)
self._assert_validation_monitor(monitor)
mock_latest_checkpoint.assert_called_with(model_dir)
@test.mock.patch.object(estimators, 'Estimator', autospec=True)
@test.mock.patch.object(checkpoint_management, 'latest_checkpoint')
def test_validation_monitor_no_early_stopping_rounds(self,
mock_latest_checkpoint,
mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
estimator.evaluate.return_value = {}
mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir
# Do nothing with early_stopping_rounds=None.
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.session(g):
self._run_monitor(monitor)
self._assert_validation_monitor(monitor)
@test.mock.patch.object(estimators, 'Estimator', autospec=True)
@test.mock.patch.object(checkpoint_management, 'latest_checkpoint')
def test_validation_monitor_invalid_metric(self, mock_latest_checkpoint,
mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
estimator.evaluate.return_value = {}
mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir
# Fail for missing metric.
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0, early_stopping_rounds=1)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.session(g):
with self.assertRaisesRegexp(ValueError, 'missing from outputs'):
self._run_monitor(monitor, num_epochs=1, num_steps_per_epoch=1)
@test.mock.patch.object(estimators, 'Estimator', autospec=True)
@test.mock.patch.object(checkpoint_management, 'latest_checkpoint')
def test_validation_monitor(self, mock_latest_checkpoint,
mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
validation_outputs = {'loss': None, 'auc': None}
estimator.evaluate.return_value = validation_outputs
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0),
every_n_steps=0,
early_stopping_rounds=2,
check_interval_secs=None)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.session(g):
monitor.begin(max_steps=100)
monitor.epoch_begin(epoch=0)
self.assertEqual(0, estimator.evaluate.call_count)
# Step 0, initial loss.
step = 0
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 42.0
validation_outputs['auc'] = 0.5
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0,
expected_best_metrics={'loss': 42.0, 'auc': 0.5})
monitor.post_step(step=step, session=None)
# Step 1, same checkpoint, no eval.
step = 1
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0,
expected_best_metrics={'loss': 42.0, 'auc': 0.5})
monitor.post_step(step=step, session=None)
# Step 2, lower loss.
step = 2
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 40.0
validation_outputs['auc'] = 0.6
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(2, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=2, expected_best_value=40.0,
expected_best_metrics={'loss': 40.0, 'auc': 0.6})
monitor.post_step(step=step, session=None)
# Step 3, higher loss.
step = 3
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 44.0
validation_outputs['auc'] = 0.7
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(3, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=2, expected_best_value=40.0,
expected_best_metrics={'loss': 40.0, 'auc': 0.6})
monitor.post_step(step=step, session=None)
# Step 4, higher loss for 2 steps, early stopping.
step = 4
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 43.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertTrue(monitor.step_end(step=step, output={}))
self.assertEqual(4, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor,
expected_early_stopped=True,
expected_best_step=2,
expected_best_value=40.0,
expected_best_metrics={'loss': 40.0, 'auc': 0.6})
monitor.post_step(step=step, session=None)
monitor.epoch_end(epoch=0)
monitor.end()
@test.mock.patch.object(checkpoint_management, 'latest_checkpoint')
def test_validation_monitor_with_core_estimator(self, mock_latest_checkpoint):
estimator = test.mock.Mock(spec=core_estimator.Estimator)
model_dir = 'model/dir'
estimator.model_dir = model_dir
validation_outputs = {'loss': None, 'auc': None}
estimator.evaluate.return_value = validation_outputs
monitor = learn.monitors.ValidationMonitor(
input_fn=lambda: constant_op.constant(2.0),
every_n_steps=0, early_stopping_rounds=2)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.session(g):
monitor.begin(max_steps=100)
monitor.epoch_begin(epoch=0)
self.assertEqual(0, estimator.evaluate.call_count)
# Step 0, initial loss.
step = 0
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 42.0
validation_outputs['auc'] = 0.5
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0,
expected_best_metrics={'loss': 42.0, 'auc': 0.5})
monitor.post_step(step=step, session=None)
@test.mock.patch.object(checkpoint_management, 'latest_checkpoint')
def test_validation_monitor_fail_with_core_estimator_and_metrics(
self, mock_latest_checkpoint):
estimator = test.mock.Mock(spec=core_estimator.Estimator)
model_dir = 'model/dir'
estimator.model_dir = model_dir
validation_outputs = {'loss': None}
estimator.evaluate.return_value = validation_outputs
monitor = learn.monitors.ValidationMonitor(
input_fn=lambda: constant_op.constant(2.0),
metrics=constant_op.constant(2.0),
every_n_steps=0, early_stopping_rounds=2)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.session(g):
monitor.begin(max_steps=100)
monitor.epoch_begin(epoch=0)
with self.assertRaisesRegexp(
ValueError,
'tf.estimator.Estimator does not support .* metrics'):
step = 0
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 42.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
def test_graph_dump(self):
monitor0 = learn.monitors.GraphDump()
monitor1 = learn.monitors.GraphDump()
with ops.Graph().as_default() as g, self.session(g):
const_var = variables.VariableV1(42.0, name='my_const')
counter_var = variables.VariableV1(0.0, name='my_counter')
assign_add = state_ops.assign_add(counter_var, 1.0, name='my_assign_add')
variables.global_variables_initializer().run()
self._run_monitor(monitor0, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 1.0,
assign_add.name: step + 1.0,
}
for step in xrange(30)
}, monitor0.data)
self._run_monitor(monitor1, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 31.0,
assign_add.name: step + 31.0,
}
for step in xrange(30)
}, monitor1.data)
for step in xrange(30):
matched, non_matched = monitor1.compare(monitor0, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 31.0, step + 1.0),
counter_var.name: (step + 31.0, step + 1.0),
}, non_matched)
matched, non_matched = monitor0.compare(monitor1, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 1.0, step + 31.0),
counter_var.name: (step + 1.0, step + 31.0),
}, non_matched)
def test_capture_variable(self):
monitor = learn.monitors.CaptureVariable(
var_name='my_assign_add:0', every_n=8, first_n=2)
with ops.Graph().as_default() as g, self.session(g):
var = variables.VariableV1(0.0, name='my_var')
var.initializer.run()
state_ops.assign_add(var, 1.0, name='my_assign_add')
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
0: 1.0,
1: 2.0,
2: 3.0,
10: 4.0,
18: 5.0,
26: 6.0,
29: 7.0,
}, monitor.values)
class StopAtStepTest(test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
learn.monitors.StopAtStep(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
m = learn.monitors.StopAtStep(last_step=10)
m.step_begin(5)
self.assertFalse(m.step_end(5, None))
m.step_begin(9)
self.assertFalse(m.step_end(9, None))
m.step_begin(10)
self.assertTrue(m.step_end(10, None))
m.step_begin(11)
self.assertTrue(m.step_end(11, None))
def test_stop_based_on_num_step(self):
m = learn.monitors.StopAtStep(num_steps=10)
m.step_begin(5)
self.assertFalse(m.step_end(5, None))
m.step_begin(13)
self.assertFalse(m.step_end(13, None))
m.step_begin(14)
self.assertTrue(m.step_end(14, None))
m.step_begin(15)
self.assertTrue(m.step_end(15, None))
class CheckpointSaverTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = training_util.get_or_create_global_step()
self.train_op = state_ops.assign_add(self.global_step, 1)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def _run(self, monitor, step, train_op, sess):
monitor.step_begin(step)
sess.run(train_op)
monitor.post_step(step, sess)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
learn.monitors.CheckpointSaver(
self.model_dir, save_secs=10, save_steps=20)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
learn.monitors.CheckpointSaver(self.model_dir)
def test_save_secs_saves_in_first_step(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# TODO(gunan): Reenable this test after b/32446874 is fixed.
def disabled_test_save_secs_saves_periodically(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
time.sleep(2.5)
self._run(monitor, 3, self.train_op, sess)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
self._run(monitor, 4, self.train_op, sess)
self._run(monitor, 5, self.train_op, sess)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
time.sleep(2.5)
self._run(monitor, 6, self.train_op, sess)
# saved
self.assertEqual(6,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_steps=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_steps=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
self._run(monitor, 3, self.train_op, sess)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
self._run(monitor, 4, self.train_op, sess)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
self._run(monitor, 5, self.train_op, sess)
# saved
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_saves_at_end(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
monitor.end(sess)
self.assertEqual(2,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
class FakeMonitor(learn.monitors.BaseMonitor):
def __init__(self):
learn.monitors.BaseMonitor.__init__(self)
self.should_stop = False
self.requested_tensors = []
self.call_counter = collections.Counter()
self.last_begin_step = None
self.last_end_step = None
self.last_post_step = None
def begin(self, max_steps):
self.call_counter['begin'] += 1
def end(self, session):
self.call_counter['end'] += 1
def step_begin(self, step):
self.call_counter['step_begin'] += 1
self.last_begin_step = step
return self.requested_tensors
def step_end(self, step, output):
self.call_counter['step_end'] += 1
self.last_end_step = step
self.output = output
return self.should_stop
def post_step(self, step, session):
self.call_counter['post_step'] += 1
self.last_post_step = step
self.session = session
class RunHookAdapterForMonitorsTest(test.TestCase):
def test_calls_and_steps(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
global_step_tensor = training_util.create_global_step()
inc_5 = state_ops.assign_add(global_step_tensor, 5)
mock_mon = FakeMonitor()
mock_mon2 = FakeMonitor()
hook = learn.monitors.RunHookAdapterForMonitors([mock_mon, mock_mon2])
hook.begin()
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.call_counter['begin'], 1)
sess.run(variables.global_variables_initializer())
sess.run(global_step_tensor.assign(10))
mon_sess = monitored_session._HookedSession(sess=sess, hooks=[hook])
mon_sess.run(inc_5)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.output, {})
self.assertEqual(mon.last_begin_step, 11)
self.assertEqual(mon.last_end_step, 11)
self.assertEqual(mon.last_post_step, 11)
self.assertEqual(mon.call_counter['step_end'], 1)
self.assertEqual(mon.call_counter['step_begin'], 1)
self.assertEqual(mon.call_counter['post_step'], 1)
mon_sess.run(inc_5)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.output, {})
self.assertEqual(mon.last_begin_step, 16)
self.assertEqual(mon.last_end_step, 16)
self.assertEqual(mon.last_post_step, 16)
self.assertEqual(mon.call_counter['step_end'], 2)
self.assertEqual(mon.call_counter['step_begin'], 2)
self.assertEqual(mon.call_counter['post_step'], 2)
hook.end(sess)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.call_counter['end'], 1)
def test_requests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
training_util.create_global_step()
mock_mon = FakeMonitor()
mock_mon2 = FakeMonitor()
hook = learn.monitors.RunHookAdapterForMonitors([mock_mon, mock_mon2])
hook.begin()
mon_sess = monitored_session._HookedSession(sess=sess, hooks=[hook])
a_tensor = constant_op.constant([0], name='a_tensor')
constant_op.constant([5], name='another_tensor')
constant_op.constant([10], name='third_tensor')
mock_mon.requested_tensors = ['another_tensor']
mock_mon2.requested_tensors = ['third_tensor']
sess.run(variables.global_variables_initializer())
output = mon_sess.run(a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_mon.output['another_tensor'], [5])
self.assertEqual(mock_mon2.output['third_tensor'], [10])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/monitors_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file is deprecated. Use `tensorflow.python.training.session_run_hook`.
See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import session_run_hook
# pylint: disable=invalid-name
SessionRunHook = session_run_hook.SessionRunHook
SessionRunArgs = session_run_hook.SessionRunArgs
SessionRunContext = session_run_hook.SessionRunContext
SessionRunValues = session_run_hook.SessionRunValues
# pylint: enable=invalid-name
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/session_run_hook.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ExportStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.python.platform import test
class ExportStrategyTest(test.TestCase):
def test_no_optional_args_export(self):
model_path = '/path/to/model'
def _export_fn(estimator, export_path):
self.assertTupleEqual((estimator, export_path), (None, None))
return model_path
strategy = export_strategy.ExportStrategy('foo', _export_fn)
self.assertTupleEqual(strategy, ('foo', _export_fn, None))
self.assertIs(strategy.export(None, None), model_path)
def test_checkpoint_export(self):
ckpt_model_path = '/path/to/checkpoint_model'
def _ckpt_export_fn(estimator, export_path, checkpoint_path):
self.assertTupleEqual((estimator, export_path), (None, None))
self.assertEqual(checkpoint_path, 'checkpoint')
return ckpt_model_path
strategy = export_strategy.ExportStrategy('foo', _ckpt_export_fn)
self.assertTupleEqual(strategy, ('foo', _ckpt_export_fn, None))
self.assertIs(strategy.export(None, None, 'checkpoint'), ckpt_model_path)
def test_checkpoint_eval_export(self):
ckpt_eval_model_path = '/path/to/checkpoint_eval_model'
def _ckpt_eval_export_fn(estimator, export_path, checkpoint_path,
eval_result):
self.assertTupleEqual((estimator, export_path), (None, None))
self.assertEqual(checkpoint_path, 'checkpoint')
self.assertEqual(eval_result, 'eval')
return ckpt_eval_model_path
strategy = export_strategy.ExportStrategy('foo', _ckpt_eval_export_fn)
self.assertTupleEqual(strategy, ('foo', _ckpt_eval_export_fn, None))
self.assertIs(strategy.export(None, None, 'checkpoint', 'eval'),
ckpt_eval_model_path)
def test_eval_only_export(self):
def _eval_export_fn(estimator, export_path, eval_result):
del estimator, export_path, eval_result
strategy = export_strategy.ExportStrategy('foo', _eval_export_fn)
self.assertTupleEqual(strategy, ('foo', _eval_export_fn, None))
with self.assertRaisesRegexp(ValueError, 'An export_fn accepting '
'eval_result must also accept '
'checkpoint_path'):
strategy.export(None, None, eval_result='eval')
def test_strip_default_attr_export(self):
strip_default_attrs_model_path = '/path/to/strip_default_attrs_model'
def _strip_default_attrs_export_fn(estimator, export_path,
strip_default_attrs):
self.assertTupleEqual((estimator, export_path), (None, None))
self.assertTrue(strip_default_attrs)
return strip_default_attrs_model_path
strategy = export_strategy.ExportStrategy('foo',
_strip_default_attrs_export_fn,
True)
self.assertTupleEqual(strategy,
('foo', _strip_default_attrs_export_fn, True))
self.assertIs(strategy.export(None, None), strip_default_attrs_model_path)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/export_strategy_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level operations on graphs (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import threading
import time
import numpy as np
from six import reraise
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import ops as contrib_ops
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import summary_io
from tensorflow.python.training import supervisor as tf_supervisor
from tensorflow.python.util.deprecation import deprecated
# Singleton for SummaryWriter per logdir folder.
_SUMMARY_WRITERS = {}
# Lock protecting _SUMMARY_WRITERS
_summary_writer_lock = threading.Lock()
_graph_action_deprecation = deprecated(
'2017-02-15',
'graph_actions.py will be deleted. Use tf.train.* utilities instead. '
'You can use learn/estimators/estimator.py as an example.')
@_graph_action_deprecation
def clear_summary_writers():
"""Clear cached summary writers. Currently only used for unit tests."""
return summary_io.SummaryWriterCache.clear()
@deprecated(None, 'Use `SummaryWriterCache.get` directly.')
def get_summary_writer(logdir):
"""Returns single SummaryWriter per logdir in current run.
Args:
logdir: str, folder to write summaries.
Returns:
Existing `SummaryWriter` object or new one if never wrote to given
directory.
"""
return summary_io.SummaryWriterCache.get(logdir)
def _make_saver(graph, keep_checkpoint_max=5):
vars_to_save = (graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
graph.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
if vars_to_save:
return tf_saver.Saver(vars_to_save,
sharded=True,
max_to_keep=keep_checkpoint_max)
else:
return None
def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):
logging.info('Loading model from checkpoint: %s.', checkpoint_path)
saver = saver or _make_saver(graph)
if saver:
saver.restore(session, checkpoint_path)
else:
logging.info('No variables found in graph, not creating Saver() object.')
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
@_graph_action_deprecation
def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
keep_checkpoint_max=5,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None,
max_steps=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to tf.compat.v1.train.Saver constructor.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
while True:
try:
return _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps)
except errors.AbortedError:
# Happens when PS restarts, keep training.
logging.warning('Training got Aborted error. Keep training.')
def _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps):
"""See train."""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = load_variable(output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# Add default chief monitors if none were provided.
if not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer) if supervisor_is_chief else []
# TODO(ipolosukhin): Replace all functionality of Supervisor
# with Chief-Exclusive Monitors.
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]
if max_steps is None:
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph, keep_checkpoint_max),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
loss_value = outputs[loss_op.name]
if np.isnan(loss_value):
failure_message = 'Model diverged with loss = NaN.'
if fail_on_nan_loss:
logging.error(failure_message)
raise monitors_lib.NanLossDuringTrainingError()
else:
logging.warning(failure_message)
if should_stop:
break
this_step = get_current_step()
if this_step <= last_step:
logging.error(
'Global step was not incremented by train op at step %s'
': new step %d', last_step, this_step)
last_step = this_step
is_last_step = (max_steps is not None) and (last_step >= max_steps)
if is_last_step or (last_step - last_log_step >= log_every_steps):
logging.info(
'training step %d, loss = %.5f (%.3f sec/batch).',
last_step, loss_value, float(time.time() - start_time))
last_log_step = last_step
except errors.OutOfRangeError as e:
logging.warn('Got exception during tf.learn training loop possibly '
'due to exhausted input queue %s.', e)
except StopIteration:
logging.info('Exhausted input iterarator.')
except BaseException as e: # pylint: disable=broad-except
# Hold on to any other exceptions while we try recording a final
# checkpoint and summary.
excinfo = sys.exc_info()
finally:
try:
# Call supervisor.Stop() from within a try block because it re-raises
# exceptions thrown by the supervised threads.
supervisor.Stop(close_summary_writer=False)
# Save one last checkpoint and summaries
# TODO(wicke): This should be handled by Supervisor
# In case we encountered an exception in the try block before we updated
# last_step, update it here (again).
last_step = get_current_step()
if supervisor_is_chief:
ckpt_path = supervisor.save_path
logging.info('Saving checkpoint for step %d to checkpoint: %s.',
last_step, ckpt_path)
supervisor.saver.save(session, ckpt_path, global_step=last_step)
# Finish monitors.
for monitor in monitors:
monitor.end()
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
'due to exhausted input queue. Note: summary_op is not '
'expected to trigger dequeues. %s.', e)
except BaseException as e: # pylint: disable=broad-except
# If we don't already have an exception to re-raise, raise this one.
if not excinfo:
raise
# Otherwise, log this one and raise the other in the finally block.
logging.error('Got exception during tf.learn final checkpoint %s.', e)
finally:
if excinfo:
reraise(*excinfo)
return loss_value
def _get_first_op_from_collection(collection_name):
elements = ops.get_collection(collection_name)
if elements:
return elements[0]
return None
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
def _get_ready_op():
ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
return ready_op
def _get_local_init_op():
"""Returns the local init ops to initialize tables and local variables."""
local_init_op = _get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [
variables.local_variables_initializer(),
lookup_ops.tables_initializer()
]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
return local_init_op
def _eval_results_to_str(eval_results):
return ', '.join('%s = %s' % (k, v) for k, v in sorted(eval_results.items()))
def _write_summary_results(output_dir, eval_results, current_global_step):
"""Writes eval results into summary file in given dir."""
logging.info('Saving evaluation summary for step %d: %s', current_global_step,
_eval_results_to_str(eval_results))
summary_writer = get_summary_writer(output_dir)
summary = summary_pb2.Summary()
for key in eval_results:
if eval_results[key] is None:
continue
value = summary.value.add()
value.tag = key
if (isinstance(eval_results[key], np.float32) or
isinstance(eval_results[key], float)):
value.simple_value = float(eval_results[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary, current_global_step)
summary_writer.flush()
@_graph_action_deprecation
def evaluate(graph,
output_dir,
checkpoint_path,
eval_dict,
update_op=None,
global_step_tensor=None,
supervisor_master='',
log_every_steps=10,
feed_fn=None,
max_steps=None):
"""Evaluate a model loaded from a checkpoint.
Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint
to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval
loop for `max_steps` steps, or until an exception (generally, an
end-of-input signal from a reader operation) is raised from running
`eval_dict`.
In each step of evaluation, all tensors in the `eval_dict` are evaluated, and
every `log_every_steps` steps, they are logged. At the very end of evaluation,
a summary is evaluated (finding the summary ops using `Supervisor`'s logic)
and written to `output_dir`.
Args:
graph: A `Graph` to train. It is expected that this graph is not in use
elsewhere.
output_dir: A string containing the directory to write a summary to.
checkpoint_path: A string containing the path to a checkpoint to restore.
Can be `None` if the graph doesn't require loading any variables.
eval_dict: A `dict` mapping string names to tensors to evaluate. It is
evaluated in every logging step. The result of the final evaluation is
returned. If `update_op` is None, then it's evaluated in every step. If
`max_steps` is `None`, this should depend on a reader that will raise an
end-of-input exception when the inputs are exhausted.
update_op: A `Tensor` which is run in every step.
global_step_tensor: A `Variable` containing the global step. If `None`,
one is extracted from the graph using the same logic as in `Supervisor`.
Used to place eval summaries on training curves.
supervisor_master: The master string to use when preparing the session.
log_every_steps: Integer. Output logs every `log_every_steps` evaluation
steps. The logs contain the `eval_dict` and timing information.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
max_steps: Integer. Evaluate `eval_dict` this many times.
Returns:
A tuple `(eval_results, global_step)`:
eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)
that are the result of running eval_dict in the last step. `None` if no
eval steps were run.
global_step: The global step this evaluation corresponds to.
Raises:
ValueError: if `output_dir` is empty.
"""
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
# Create or get summary op, global_step and saver.
saver = _get_saver()
local_init_op = _get_local_init_op()
ready_for_local_init_op = _get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
ready_op = _get_ready_op()
session_manager = session_manager_lib.SessionManager(
local_init_op=local_init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op)
session, initialized = session_manager.recover_session(
master=supervisor_master,
saver=saver,
checkpoint_dir=checkpoint_path)
# Start queue runners.
coord = coordinator.Coordinator()
threads = queue_runner.start_queue_runners(session, coord)
with session:
if not initialized:
logging.warning('Failed to initialize from %s.', checkpoint_path)
# TODO(ipolosukhin): This should be failing, but old code relies on that.
session.run(variables.global_variables_initializer())
if checkpoint_path:
_restore_from_checkpoint(session, graph, checkpoint_path, saver)
current_global_step = session.run(global_step_tensor)
eval_results = None
# TODO(amodei): Fix this to run through the eval set exactly once.
step = 0
eval_step = None
feed_dict = None
logging.info('Eval steps [%d,%s) for training step %d.', step,
'inf' if max_steps is None
else str(max_steps), current_global_step)
try:
try:
while (max_steps is None) or (step < max_steps):
step += 1
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
if update_op is not None:
session.run(update_op, feed_dict=feed_dict)
else:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# TODO(wicke): We should assert that the global step hasn't changed.
if step % log_every_steps == 0:
if eval_step is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
duration = time.time() - start_time
logging.info('Results after %d steps (%.3f sec/batch): %s.',
step, float(duration),
_eval_results_to_str(eval_results))
finally:
if eval_results is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# Stop session first, before queue runners.
session.close()
# Stop queue runners.
try:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
except (RuntimeError, errors.CancelledError) as e:
logging.warning('Coordinator didn\'t stop cleanly: %s', e)
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
if max_steps is None:
logging.info('Input queue is exhausted.')
else:
logging.warn('Input queue is exhausted: %s.', e)
# catch StopIteration which is thrown is DataReader is out of data.
except StopIteration as e:
if max_steps is None:
logging.info('Input iterator is exhausted.')
else:
logging.warn('Input iterator is exhausted: %s.', e)
# Save summaries for this evaluation.
_write_summary_results(output_dir, eval_results, current_global_step)
return eval_results, current_global_step
@_graph_action_deprecation
def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):
"""Run `output_dict` tensors `n` times, with the same `feed_dict` each run.
Args:
output_dict: A `dict` mapping string names to tensors to run. Must all be
from the same graph.
feed_dict: `dict` of input values to feed each run.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
n: Number of times to repeat.
Returns:
A list of `n` `dict` objects, each containing values read from `output_dict`
tensors.
"""
return run_feeds(
output_dict=output_dict,
feed_dicts=itertools.repeat(feed_dict, n),
restore_checkpoint_path=restore_checkpoint_path)
@_graph_action_deprecation
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Yields:
A sequence of dicts of values read from `output_dict` tensors, one item
yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
session.run(
resources.initialize_resources(resources.shared_resources() +
resources.local_resources()))
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.global_variables_initializer())
session.run(variables.local_variables_initializer())
session.run(lookup_ops.tables_initializer())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
for f in feed_dicts:
yield session.run(output_dict, f)
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
@_graph_action_deprecation
def run_feeds(*args, **kwargs):
"""See run_feeds_iter(). Returns a `list` instead of an iterator."""
return list(run_feeds_iter(*args, **kwargs))
@_graph_action_deprecation
def infer(restore_checkpoint_path, output_dict, feed_dict=None):
"""Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
`output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
return run_feeds(output_dict=output_dict,
feed_dicts=[feed_dict] if feed_dict is not None else [None],
restore_checkpoint_path=restore_checkpoint_path)[0]
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/graph_actions.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various high level TF models (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.ops import losses_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.summary import summary
from tensorflow.python.util.deprecation import deprecated
@deprecated(None, 'Consider using a tf.estimator.LinearRegressor')
def linear_regression_zero_init(x, y):
"""Linear regression subgraph with zero-value initial weights and bias.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
Returns:
Predictions and loss tensors.
"""
return linear_regression(x, y, init_mean=0.0, init_stddev=0.0)
@deprecated(None, 'Consider using a class from tf.estimator.LinearClassifier')
def logistic_regression_zero_init(x, y):
"""Logistic regression subgraph with zero-value initial weights and bias.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
Returns:
Predictions and loss tensors.
"""
return logistic_regression(x, y, init_mean=0.0, init_stddev=0.0)
@deprecated(None, 'Consider using a class from tf.estimator.')
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
init_mean: the mean value to use for initialization.
init_stddev: the standard deviation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
scope_name = vs.get_variable_scope().name
summary.histogram('%s.x' % scope_name, x)
summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape], dtype=dtype)
bias = vs.get_variable('bias', [output_shape], dtype=dtype)
else:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable(
'bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
summary.histogram('%s.weights' % scope_name, weights)
summary.histogram('%s.bias' % scope_name, bias)
return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
@deprecated(None, 'Consider using a class from tf.estimator.')
def logistic_regression(x,
y,
class_weight=None,
init_mean=None,
init_stddev=1.0):
"""Creates logistic regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features,
shape should be [batch_size, n_features].
y: tensor or placeholder for labels (one-hot),
shape should be [batch_size, n_classes].
class_weight: tensor, [n_classes], where for each class
it has weight of the class. If not provided
will check if graph contains tensor `class_weight:0`.
If that is not provided either all ones are used.
init_mean: the mean value to use for initialization.
init_stddev: the standard deviation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('logistic_regression'):
scope_name = vs.get_variable_scope().name
summary.histogram('%s.x' % scope_name, x)
summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], y.get_shape()[-1]], dtype=dtype)
bias = vs.get_variable('bias', [y.get_shape()[-1]], dtype=dtype)
else:
weights = vs.get_variable(
'weights', [x.get_shape()[1], y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable(
'bias', [y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
summary.histogram('%s.weights' % scope_name, weights)
summary.histogram('%s.bias' % scope_name, bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
try:
class_weight = ops.get_default_graph().get_tensor_by_name(
'class_weight:0')
except KeyError:
pass
return losses_ops.softmax_classifier(
x, y, weights, bias, class_weight=class_weight)
## This will be in TensorFlow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply
reverses the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops_.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops_.unpack(s_reversed)
return result
@deprecated(None, 'Please consider `tf.nn.bidirectional_dynamic_rnn`.')
def bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states
are ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int64 vector (tensor) of size
[batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
state is the concatenated final state of the forward and backward RNN
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, contrib_rnn.RNNCell):
raise TypeError('cell_fw must be an instance of RNNCell')
if not isinstance(cell_bw, contrib_rnn.RNNCell):
raise TypeError('cell_bw must be an instance of RNNCell')
if not isinstance(inputs, list):
raise TypeError('inputs must be a list')
if not inputs:
raise ValueError('inputs must not be empty')
name = scope or 'BiRNN'
# Forward direction
with vs.variable_scope(name + '_FW'):
output_fw, state_fw = contrib_rnn.static_rnn(cell_fw, inputs,
initial_state_fw, dtype,
sequence_length)
# Backward direction
with vs.variable_scope(name + '_BW'):
tmp, state_bw = contrib_rnn.static_rnn(
cell_bw,
_reverse_seq(inputs, sequence_length), initial_state_bw, dtype,
sequence_length)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [
array_ops_.concat([fw, bw], 1) for fw, bw in zip(output_fw, output_bw)
]
return outputs, array_ops_.concat([state_fw, state_bw], 1)
# End of TensorFlow 0.7
@deprecated(None, 'Please consider tensorflow/tensor2tensor.')
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,
target_predictor_fn, sequence_length, initial_state,
attn_length, attn_size, attn_vec_size):
"""Returns a function that creates a RNN TensorFlow subgraph.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument `x` for input and returns transformed `x`.
bidirectional: boolean, Whether this is a bidirectional rnn.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes `x`, `y` and returns predictions and loss
tensors.
sequence_length: If sequence_length is provided, dynamic calculation is
performed. This saves computational time when unrolling past max sequence
length. Required for bidirectional RNNs.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell
state.
Returns:
A function that creates the subgraph.
"""
def rnn_estimator(x, y):
"""RNN estimator with target predictor function on top."""
x = input_op_fn(x)
if cell_type == 'rnn':
cell_fn = contrib_rnn.BasicRNNCell
elif cell_type == 'gru':
cell_fn = contrib_rnn.GRUCell
elif cell_type == 'lstm':
cell_fn = functools.partial(
contrib_rnn.BasicLSTMCell, state_is_tuple=False)
else:
raise ValueError('cell_type {} is not supported. '.format(cell_type))
# TODO(ipolosukhin): state_is_tuple=False is deprecated
if bidirectional:
# forward direction cell
fw_cell = lambda: cell_fn(rnn_size)
bw_cell = lambda: cell_fn(rnn_size)
# attach attention cells if specified
if attn_length is not None:
def attn_fw_cell():
return contrib_rnn.AttentionCellWrapper(
fw_cell(),
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
def attn_bw_cell():
return contrib_rnn.AttentionCellWrapper(
bw_cell(),
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
else:
attn_fw_cell = fw_cell
attn_bw_cell = bw_cell
rnn_fw_cell = contrib_rnn.MultiRNNCell(
[attn_fw_cell() for _ in range(num_layers)], state_is_tuple=False)
# backward direction cell
rnn_bw_cell = contrib_rnn.MultiRNNCell(
[attn_bw_cell() for _ in range(num_layers)], state_is_tuple=False)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
_, encoding = bidirectional_rnn(
rnn_fw_cell,
rnn_bw_cell,
x,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state,
initial_state_bw=initial_state)
else:
rnn_cell = lambda: cell_fn(rnn_size)
if attn_length is not None:
def attn_rnn_cell():
return contrib_rnn.AttentionCellWrapper(
rnn_cell(),
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
else:
attn_rnn_cell = rnn_cell
cell = contrib_rnn.MultiRNNCell(
[attn_rnn_cell() for _ in range(num_layers)], state_is_tuple=False)
_, encoding = contrib_rnn.static_rnn(
cell,
x,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state=initial_state)
return target_predictor_fn(encoding, y)
return rnn_estimator
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/models.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The metric spec class to flexibly connect models and metrics (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.deprecation import deprecated
def _assert_named_args(sentinel):
if sentinel is not None:
raise ValueError(
'`metric_fn` requires named args: '
'`labels`, `predictions`, and optionally `weights`.')
def _args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
"""
if hasattr(fn, 'func') and hasattr(fn, 'keywords'):
# Handle functools.partial and similar objects.
return tuple(
[arg for arg in _args(fn.func) if arg not in set(fn.keywords.keys())])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
_CANONICAL_LABELS_ARG = 'labels'
_LABELS_ARGS = set((_CANONICAL_LABELS_ARG, 'label', 'targets', 'target'))
_CANONICAL_PREDICTIONS_ARG = 'predictions'
_PREDICTIONS_ARGS = set((_CANONICAL_PREDICTIONS_ARG, 'prediction',
'logits', 'logit'))
_CANONICAL_WEIGHTS_ARG = 'weights'
_WEIGHTS_ARGS = set((_CANONICAL_WEIGHTS_ARG, 'weight'))
def _matching_arg(
fn_name, fn_args, candidate_args, canonical_arg, is_required=False):
"""Find single argument in `args` from `candidate_args`.
Args:
fn_name: Function name, only used for error string.
fn_args: String argument names to `fn_name` function.
candidate_args: Candidate argument names to find in `args`.
canonical_arg: Canonical argument name in `candidate_args`. This is only
used to log a warning if a non-canonical match is found.
is_required: Whether function is required to have an arg in
`candidate_args`.
Returns:
String argument name if found, or `None` if not found.
Raises:
ValueError: if 2 candidates are found, or 0 are found and `is_required` is
set.
"""
assert canonical_arg in candidate_args # Sanity check.
matching_args = candidate_args.intersection(fn_args)
if len(matching_args) > 1:
raise ValueError(
'Ambiguous arguments %s, must provide only one of %s.' % (
matching_args, candidate_args))
matching_arg = matching_args.pop() if matching_args else None
if matching_arg:
if matching_arg != canonical_arg:
logging.warning(
'Canonical arg %s missing from %s(%s), using %s.',
canonical_arg, fn_name, fn_args, matching_arg)
elif is_required:
raise ValueError(
'%s missing from %s(%s).' % (candidate_args, fn_name, fn_args))
return matching_arg
def _fn_name(fn):
if hasattr(fn, '__name__'):
return fn.__name__
if hasattr(fn, 'func') and hasattr(fn.func, '__name__'):
return fn.func.__name__ # If it's a functools.partial.
return str(fn)
def _adapt_metric_fn(
metric_fn, metric_fn_name, is_labels_required, is_weights_required):
"""Adapt `metric_fn` to take only named args.
This returns a function that takes only named args `labels`, `predictions`,
and `weights`, and invokes `metric_fn` according to the following rules:
- If `metric_fn` args include exactly one of `_LABELS_ARGS`, that arg is
passed (usually by name, but positionally if both it and `predictions` need
to be passed positionally). Otherwise, `labels` are omitted.
- If `metric_fn` args include exactly one of `_PREDICTIONS_ARGS`, that arg is
passed by name. Otherwise, `predictions` are passed positionally as the
first non-label argument.
- If exactly one of `_WEIGHTS_ARGS` is provided, that arg is passed by
name.
Args:
metric_fn: Metric function to be wrapped.
metric_fn_name: `metric_fn` name, only used for logging.
is_labels_required: Whether `labels` is a required arg.
is_weights_required: Whether `weights` is a required arg.
Returns:
Function accepting only named args `labels, `predictions`, and `weights`,
and passing those to `metric_fn`.
Raises:
ValueError: if one of the following is true:
- `metric_fn` has more than one arg of `_LABELS_ARGS`, `_PREDICTIONS_ARGS`,
or `_WEIGHTS_ARGS`
- `is_labels_required` is true, and `metric_fn` has no arg from
`_LABELS_ARGS`.
- `is_weights_required` is true, and `metric_fn` has no arg from
`_WEIGHTS_ARGS`.
"""
args = _args(metric_fn)
labels_arg = _matching_arg(
metric_fn_name, args, _LABELS_ARGS, _CANONICAL_LABELS_ARG,
is_labels_required)
predictions_arg = _matching_arg(
metric_fn_name, args, _PREDICTIONS_ARGS, _CANONICAL_PREDICTIONS_ARG)
weights_arg = _matching_arg(
metric_fn_name, args, _WEIGHTS_ARGS, _CANONICAL_WEIGHTS_ARG,
is_weights_required)
# pylint: disable=invalid-name
if labels_arg:
if predictions_arg:
# Both labels and predictions are named args.
def _named_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
_assert_named_args(_sentinel)
kwargs = {
labels_arg: labels,
predictions_arg: predictions,
}
if weights is not None:
kwargs[weights_arg] = weights
return metric_fn(**kwargs)
return _named_metric_fn
if labels_arg == args[0]:
# labels is a named arg, and first. predictions is not a named arg, so we
# want to pass it as the 2nd positional arg (i.e., the first non-labels
# position), which means passing both positionally.
def _positional_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
_assert_named_args(_sentinel)
# TODO(ptucker): Should we support metrics that take only labels?
# Currently, if you want streaming mean of a label, you have to wrap it
# in a fn that takes discards predictions.
if weights is None:
return metric_fn(labels, predictions)
return metric_fn(labels, predictions, **{weights_arg: weights})
return _positional_metric_fn
# labels is a named arg, and not first, so we pass predictions positionally
# and labels by name.
def _positional_predictions_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
_assert_named_args(_sentinel)
kwargs = {
labels_arg: labels,
}
if weights is not None:
kwargs[weights_arg] = weights
return metric_fn(predictions, **kwargs)
return _positional_predictions_metric_fn
if predictions_arg:
# No labels, and predictions is named, so we pass the latter as a named arg.
def _named_no_labels_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
del labels
_assert_named_args(_sentinel)
kwargs = {
predictions_arg: predictions,
}
# TODO(ptucker): Should we allow weights with no labels?
if weights is not None:
kwargs[weights_arg] = weights
return metric_fn(**kwargs)
return _named_no_labels_metric_fn
# Neither labels nor predictions are named, so we just pass predictions as the
# first arg.
def _positional_no_labels_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
del labels
_assert_named_args(_sentinel)
if weights is None:
return metric_fn(predictions)
# TODO(ptucker): Should we allow weights with no labels?
return metric_fn(predictions, **{weights_arg: weights})
return _positional_no_labels_metric_fn
class MetricSpec(object):
"""MetricSpec connects a model to metric functions.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
The MetricSpec class contains all information necessary to connect the
output of a `model_fn` to the metrics (usually, streaming metrics) that are
used in evaluation.
It is passed in the `metrics` argument of `Estimator.evaluate`. The
`Estimator` then knows which predictions, labels, and weight to use to call a
given metric function.
When building the ops to run in evaluation, an `Estimator` will call
`create_metric_ops`, which will connect the given `metric_fn` to the model
as detailed in the docstring for `create_metric_ops`, and return the metric.
Example:
Assuming a model has an input function which returns inputs containing
(among other things) a tensor with key "input_key", and a labels dictionary
containing "label_key". Let's assume that the `model_fn` for this model
returns a prediction with key "prediction_key".
In order to compute the accuracy of the "prediction_key" prediction, we
would add
```
"prediction accuracy": MetricSpec(metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key")
```
to the metrics argument to `evaluate`. `prediction_accuracy_fn` can be either
a predefined function in metric_ops (e.g., `streaming_accuracy`) or a custom
function you define.
If we would like the accuracy to be weighted by "input_key", we can add that
as the `weight_key` argument.
```
"prediction accuracy": MetricSpec(metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key",
weight_key="input_key")
```
An end-to-end example is as follows:
```
estimator = tf.contrib.learn.Estimator(...)
estimator.fit(...)
_ = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
'prediction accuracy':
metric_spec.MetricSpec(
metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key")
})
```
"""
@deprecated(None, 'Use tf.estimator.EstimatorSpec.eval_metric_ops.')
def __init__(self,
metric_fn,
prediction_key=None,
label_key=None,
weight_key=None):
"""Constructor.
Creates a MetricSpec.
Args:
metric_fn: A function to use as a metric. See `_adapt_metric_fn` for
rules on how `predictions`, `labels`, and `weights` are passed to this
function. This must return either a single `Tensor`, which is
interpreted as a value of this metric, or a pair
`(value_op, update_op)`, where `value_op` is the op to call to
obtain the value of the metric, and `update_op` should be run for
each batch to update internal state.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`metric_fn`. Optional. If `None`, the `model_fn` must return a single
tensor or a dict with only a single entry as `predictions`.
label_key: The key for a tensor in the `labels` dict (output from the
`input_fn`) to use as the `labels` input to the `metric_fn`.
Optional. If `None`, the `input_fn` must return a single tensor or a
dict with only a single entry as `labels`.
weight_key: The key for a tensor in the `inputs` dict (output from the
`input_fn`) to use as the `weights` input to the `metric_fn`.
Optional. If `None`, no weights will be passed to the `metric_fn`.
"""
self._metric_fn_name = _fn_name(metric_fn)
self._metric_fn = _adapt_metric_fn(
metric_fn=metric_fn,
metric_fn_name=self._metric_fn_name,
is_labels_required=label_key is not None,
is_weights_required=weight_key is not None)
self._prediction_key = prediction_key
self._label_key = label_key
self._weight_key = weight_key
@property
def prediction_key(self):
return self._prediction_key
@property
def label_key(self):
return self._label_key
@property
def weight_key(self):
return self._weight_key
@property
def metric_fn(self):
"""Metric function.
This function accepts named args: `predictions`, `labels`, `weights`. It
returns a single `Tensor` or `(value_op, update_op)` pair. See `metric_fn`
constructor argument for more details.
Returns:
Function, see `metric_fn` constructor argument for more details.
"""
return self._metric_fn
def __str__(self):
return ('MetricSpec(metric_fn=%s, ' % self._metric_fn_name +
'prediction_key=%s, ' % self.prediction_key +
'label_key=%s, ' % self.label_key +
'weight_key=%s)' % self.weight_key
)
def create_metric_ops(self, inputs, labels, predictions):
"""Connect our `metric_fn` to the specified members of the given dicts.
This function will call the `metric_fn` given in our constructor as follows:
```
metric_fn(predictions[self.prediction_key],
labels[self.label_key],
weights=weights[self.weight_key])
```
And returns the result. The `weights` argument is only passed if
`self.weight_key` is not `None`.
`predictions` and `labels` may be single tensors as well as dicts. If
`predictions` is a single tensor, `self.prediction_key` must be `None`. If
`predictions` is a single element dict, `self.prediction_key` is allowed to
be `None`. Conversely, if `labels` is a single tensor, `self.label_key` must
be `None`. If `labels` is a single element dict, `self.label_key` is allowed
to be `None`.
Args:
inputs: A dict of inputs produced by the `input_fn`
labels: A dict of labels or a single label tensor produced by the
`input_fn`.
predictions: A dict of predictions or a single tensor produced by the
`model_fn`.
Returns:
The result of calling `metric_fn`.
Raises:
ValueError: If `predictions` or `labels` is a single `Tensor` and
`self.prediction_key` or `self.label_key` is not `None`; or if
`self.label_key` is `None` but `labels` is a dict with more than one
element, or if `self.prediction_key` is `None` but `predictions` is a
dict with more than one element.
"""
def _get_dict(name, dict_or_tensor, key):
"""Get a single tensor or an element of a dict or raise ValueError."""
if key:
if not isinstance(dict_or_tensor, dict):
raise ValueError('MetricSpec with ' + name + '_key specified'
' requires ' +
name + 's dict, got %s.\n' % dict_or_tensor +
'You must not provide a %s_key if you ' % name +
'only have a single Tensor as %ss.' % name)
if key not in dict_or_tensor:
raise KeyError(
'Key \'%s\' missing from %s.' % (key, dict_or_tensor.keys()))
return dict_or_tensor[key]
else:
if isinstance(dict_or_tensor, dict):
if len(dict_or_tensor) != 1:
raise ValueError('MetricSpec without specified ' + name + '_key'
' requires ' + name + 's tensor or single element'
' dict, got %s' % dict_or_tensor)
return six.next(six.itervalues(dict_or_tensor))
return dict_or_tensor
# Get the predictions.
prediction = _get_dict('prediction', predictions, self.prediction_key)
# Get the labels.
label = _get_dict('label', labels, self.label_key)
try:
return self.metric_fn(
labels=label,
predictions=prediction,
weights=inputs[self.weight_key] if self.weight_key else None)
except Exception as ex:
logging.error('Could not create metric ops for %s, %s.' % (self, ex))
raise
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/metric_spec.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to run and tune an Experiment (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
@@run
@@tune
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_runner import * # pylint: disable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/learn_runner_lib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/grid_search_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
import time
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
class SheepCounter(object):
"""To be patched in for the time module, replacing sleep() and time()."""
def __init__(self):
self._total_time = 0
self._sleeptimes = []
self._time_calls = 0
def sleep(self, t):
self._total_time += t
self._sleeptimes += [t]
def time(self):
self._time_calls += 1
return self._total_time
@property
def sleep_times(self):
return self._sleeptimes
@property
def time_calls(self):
return self._time_calls
class TestBaseEstimator(object):
def __init__(self, config, max_evals, eval_dict):
self.eval_count = 0
self.fit_count = 0
self._max_evals = max_evals
self.export_count = 0
self.monitors = []
self.eval_hooks = []
self._config = config or run_config.RunConfig()
self._model_dir = tempfile.mkdtemp()
self._eval_dict = eval_dict
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf_logging.info('evaluate called with args: %s' % kwargs)
if 'hooks' in kwargs:
self.eval_hooks = kwargs['hooks']
self.eval_count += 1
if self.eval_count > self._max_evals:
tf_logging.info('Ran %d evals. Done.' % self.eval_count)
raise StopIteration()
return self._eval_dict
def fake_checkpoint(self):
save_path = os.path.join(self.model_dir, 'model.ckpt')
with session.Session() as sess:
var = variables.Variable(1.0, name='var0')
save = saver.Saver({var.op.name: var})
var.initializer.run()
save.save(sess, save_path, global_step=0)
def train(self, **kwargs):
self.fake_checkpoint()
tf_logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def export_savedmodel(self, export_dir_base, serving_input_fn, **kwargs):
tf_logging.info('export_savedmodel called with args: %s, %s, %s' %
(export_dir_base, serving_input_fn, kwargs))
self.export_count += 1
return os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes('bogus_timestamp'))
def _check_method_supports_args(method, kwargs):
"""Checks that the given method supports the given args."""
supported_args = tuple(tf_inspect.getargspec(method).args)
for kwarg in kwargs:
if kwarg not in supported_args:
raise ValueError(
'Argument `{}` is not supported in method {}.'.format(kwarg, method))
class TestEstimator(
TestBaseEstimator, evaluable.Evaluable, trainable.Trainable):
def __init__(self, config=None, max_evals=5, eval_dict=None):
super(TestEstimator, self).__init__(config, max_evals, eval_dict)
tf_logging.info('Create Estimator')
def evaluate(self, **kwargs):
_check_method_supports_args(evaluable.Evaluable.evaluate, kwargs)
return super(TestEstimator, self).evaluate(**kwargs)
def fit(self, **kwargs):
_check_method_supports_args(trainable.Trainable.fit, kwargs)
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return super(TestEstimator, self).train(**kwargs)
def train(self, **kwargs):
raise ValueError('`train` is not defined in Estimator.')
def export_savedmodel(
self, export_dir_base, serving_input_fn, **kwargs):
_check_method_supports_args(
estimator_lib.Estimator.export_savedmodel, kwargs)
return super(TestEstimator, self).export_savedmodel(
export_dir_base, serving_input_fn, **kwargs)
class TestCoreEstimator(TestBaseEstimator, core_estimator.Estimator):
def __init__(self, config=None, max_evals=5, eval_dict=None):
super(TestCoreEstimator, self).__init__(config, max_evals, eval_dict)
tf_logging.info('Create Core Estimator')
def evaluate(self, **kwargs):
_check_method_supports_args(core_estimator.Estimator.evaluate, kwargs)
return super(TestCoreEstimator, self).evaluate(**kwargs)
def train(self, **kwargs):
_check_method_supports_args(core_estimator.Estimator.train, kwargs)
if 'hooks' in kwargs:
self.monitors = kwargs['hooks']
return super(TestCoreEstimator, self).train(**kwargs)
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn, **kwargs):
_check_method_supports_args(
core_estimator.Estimator.export_savedmodel, kwargs)
return super(TestCoreEstimator, self).export_savedmodel(
export_dir_base, serving_input_receiver_fn, **kwargs)
class _NoopHook(session_run_hook.SessionRunHook):
pass
class ExperimentTest(test.TestCase):
def _cluster_spec(self):
return {
run_config_lib.TaskType.PS: ['host1:2222', 'host2:2222'],
run_config_lib.TaskType.WORKER:
['host3:2222', 'host4:2222', 'host5:2222']
}
def _estimators_for_tests(self, config=None, eval_dict=None):
return [TestEstimator(config=config, eval_dict=eval_dict),
TestCoreEstimator(config=config, eval_dict=eval_dict)]
def test_eval_metrcis_for_core_estimator(self):
est = TestCoreEstimator()
with self.assertRaisesRegexp(
ValueError, '`eval_metrics` must be `None`'):
experiment.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
def test_default_output_alternative_key_core_estimator(self):
est = TestCoreEstimator()
export_strategy = saved_model_export_utils.make_export_strategy(
est,
default_output_alternative_key='export_key',
exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
with self.assertRaisesRegexp(
ValueError, 'default_output_alternative_key is not supported'):
ex.train_and_evaluate()
def test_train(self):
for est in self._estimators_for_tests():
if isinstance(est, core_estimator.Estimator):
eval_metrics = None
saving_listeners = 'saving_listeners'
else:
eval_metrics = 'eval_metrics'
saving_listeners = None
ex = experiment.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
saving_listeners=saving_listeners)
fit_args = ex.train(delay_secs=0)
self.assertEqual(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEqual(0, est.eval_count)
def test_train_delay(self):
for est in self._estimators_for_tests():
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
ex.train(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.time(), delta=1e-4)
def test_train_default_delay(self):
for task_id in [0, 1, 3]:
tf_config = {'task': {'index': task_id}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
ex.train()
self.assertAlmostEqual(task_id * 5, sheep.time(), delta=1e-4)
@test.mock.patch.object(server_lib, 'Server')
def test_train_starts_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host4:2222', num_cores=15, gpu_memory_fraction=0.314)
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
ex.train(delay_secs=1)
# Ensure that the delay takes into account the time to start server.
self.assertAlmostEqual(1, sheep.time(), delta=1e-4)
# Assert.
expected_config_proto = config_pb2.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name=run_config_lib.TaskType.WORKER,
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([test.mock.call().start()])
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_with_empty_master(self, mock_server):
tf_config = {'cluster': self._cluster_spec()}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(master='')
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'index': 1
}
}
with test.mock.patch.dict(
'os.environ',
{'TF_CONFIG': json.dumps(tf_config)}), self.assertRaises(ValueError):
config = run_config_lib.RunConfig(
master='host3:2222' # Normally selected by task type.
)
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEqual(0, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_evaluate_delay(self):
for est in self._estimators_for_tests():
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input',
eval_hooks=[noop_hook])
for delay in [0, 1, 3]:
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
ex.evaluate(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.time(), delta=1e-4)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(StopIteration, ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertEqual(0, est.fit_count)
self.assertEqual(6, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval_ends_after_train_step(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0,
train_steps=100)
ex.continuous_eval()
self.assertEqual(0, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
self.assertRaises(
StopIteration,
ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertAlmostEqual(5 * delay, sheep.time(), delta=1e-4)
def test_continuous_eval_predicate_fn(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
def _predicate_fn(unused_eval_result):
return est.eval_count < 3 # pylint: disable=cell-var-from-loop
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
ex.continuous_eval(evaluate_checkpoint_only_once=False,
continuous_eval_predicate_fn=_predicate_fn)
self.assertEqual(0, est.fit_count)
self.assertEqual(3, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval_predicate_fn_with_checkpoint(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
def _predicate_fn(eval_result, checkpoint_path):
self.assertEqual(eval_result is None,
checkpoint_path is None)
return est.eval_count < 3 # pylint: disable=cell-var-from-loop
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
ex.continuous_eval(
evaluate_checkpoint_only_once=False,
continuous_eval_predicate_fn=_predicate_fn)
self.assertEqual(0, est.fit_count)
self.assertEqual(3, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_run_local(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, len(est.monitors))
self.assertEqual([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0],
session_run_hook.SessionRunHook))
def test_train_hooks_extend_does_not_mutate_input_hooks(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
noop_hook = _NoopHook()
input_hooks = [noop_hook]
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
train_monitors=input_hooks)
self.assertAllEqual([noop_hook], ex._train_monitors)
another_noop_hook = _NoopHook()
# Assert that the extend API mutates the hooks, but not the input hooks
ex.extend_train_hooks([another_noop_hook])
self.assertAllEqual([noop_hook, another_noop_hook], ex._train_monitors)
self.assertAllEqual([noop_hook], input_hooks)
def test_invalid_export_strategies(self):
for est in self._estimators_for_tests():
with self.assertRaisesRegexp(ValueError, 'ExportStrategy'):
experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps=100,
eval_steps=100,
export_strategies='not_an_export_strategy')
with self.assertRaisesRegexp(ValueError, 'ExportStrategy'):
experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps=100,
eval_steps=100,
export_strategies=['not_an_export_srategy'])
def test_export_strategies_reset(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
export_strategy_1 = saved_model_export_utils.make_export_strategy(
est,
None if isinstance(est, core_estimator.Estimator) else 'export_1',
exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
train_steps=100,
eval_steps=100,
export_strategies=(export_strategy_1,))
ex.train_and_evaluate()
self.assertEqual(1, est.export_count)
# After reset with empty list (None), the count does not change and the
# user provided export strategy list should remain intact.
old_es = ex.reset_export_strategies()
ex.train_and_evaluate()
self.assertAllEqual([export_strategy_1], old_es)
self.assertEqual(1, est.export_count)
# After reset with list, the count should increase with the number of
# items.
export_strategy_2 = saved_model_export_utils.make_export_strategy(
est,
None if isinstance(est, core_estimator.Estimator) else 'export_2',
exports_to_keep=None)
export_strategy_3 = saved_model_export_utils.make_export_strategy(
est,
None if isinstance(est, core_estimator.Estimator) else 'export_3',
exports_to_keep=None)
old_es = ex.reset_export_strategies(
[export_strategy_2, export_strategy_3])
ex.train_and_evaluate()
self.assertAllEqual([], old_es)
self.assertEqual(3, est.export_count)
def test_train_and_evaluate(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
noop_hook = _NoopHook()
export_strategy = saved_model_export_utils.make_export_strategy(
est,
None if isinstance(est, core_estimator.Estimator) else 'export_input',
exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.train_and_evaluate()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, est.export_count)
self.assertEqual(1, len(est.monitors))
self.assertEqual([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0],
session_run_hook.SessionRunHook))
def test_train_and_evaluate_with_no_eval_during_training(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
min_eval_frequency=0)
ex.train_and_evaluate()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(0, len(est.monitors))
def test_min_eval_frequency_defaults(self):
def dummy_model_fn(features, labels): # pylint: disable=unused-argument
pass
estimator = core_estimator.Estimator(dummy_model_fn, '/tmp/dummy')
ex = experiment.Experiment(
estimator, train_input_fn=None, eval_input_fn=None)
self.assertEquals(ex._min_eval_frequency, 1)
def test_continuous_train_and_eval(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
if isinstance(est, core_estimator.Estimator):
eval_metrics = None
saving_listeners = 'saving_listeners'
else:
eval_metrics = 'eval_metrics'
saving_listeners = None
noop_hook = _NoopHook()
export_strategy = saved_model_export_utils.make_export_strategy(
est,
None if isinstance(est, core_estimator.Estimator) else 'export_input',
exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
export_strategies=export_strategy,
saving_listeners=saving_listeners)
ex.continuous_train_and_eval()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, est.export_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_train_and_eval_with_predicate_fn(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
export_strategy = saved_model_export_utils.make_export_strategy(
est,
None if isinstance(est, core_estimator.Estimator) else 'export_input',
exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
train_steps=100000000000, # a value will make `ex` never stops.
eval_steps=100,
export_strategies=export_strategy)
def predicate_fn(eval_result):
del eval_result # unused. for fn signature.
return False
ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn)
self.assertEqual(0, est.fit_count)
self.assertEqual(0, est.eval_count)
self.assertEqual(0, est.export_count)
def test_continuous_train_and_eval_with_adapted_steps_per_iteration(self):
mock_estimator = test.mock.Mock(core_estimator.Estimator)
type(mock_estimator).model_dir = test.mock.PropertyMock(
return_value='test_dir')
total_steps = 100000000000000
ex = experiment.Experiment(
mock_estimator,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps=total_steps)
def predicate_fn(eval_result):
# Allows the first invoke only.
return eval_result is None
ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn)
mock_estimator.train.assert_called_once_with(
input_fn='train_input',
steps=int(total_steps / 10),
max_steps=test.mock.ANY,
hooks=test.mock.ANY,
saving_listeners=test.mock.ANY)
def test_continuous_train_and_eval_with_steps_per_iteration_from_user(self):
mock_estimator = test.mock.Mock(core_estimator.Estimator)
type(mock_estimator).model_dir = test.mock.PropertyMock(
return_value='test_dir')
total_steps = 100000000000000
ex = experiment.Experiment(
mock_estimator,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps_per_iteration=1234,
train_steps=total_steps)
def predicate_fn(eval_result):
# Allows the first invoke only.
return eval_result is None
ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn)
mock_estimator.train.assert_called_once_with(
input_fn='train_input',
steps=1234,
max_steps=test.mock.ANY,
hooks=test.mock.ANY,
saving_listeners=test.mock.ANY)
def test_continuous_train_and_eval_with_default_steps_per_iteration(self):
mock_estimator = test.mock.Mock(core_estimator.Estimator)
type(mock_estimator).model_dir = test.mock.PropertyMock(
return_value='test_dir')
ex = experiment.Experiment(
mock_estimator,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps_per_iteration=None,
train_steps=None)
def predicate_fn(eval_result):
# Allows the first invoke only.
return eval_result is None
ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn)
mock_estimator.train.assert_called_once_with(
input_fn='train_input',
steps=1000,
max_steps=test.mock.ANY,
hooks=test.mock.ANY,
saving_listeners=test.mock.ANY)
def test_continuous_train_and_eval_with_invalid_predicate_fn(self):
for est in self._estimators_for_tests():
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
with self.assertRaisesRegexp(
ValueError, '`continuous_eval_predicate_fn` must be a callable'):
ex.continuous_train_and_eval(continuous_eval_predicate_fn='fn')
def test_continuous_train_and_eval_with_invalid_train_steps_iterations(self):
for est in self._estimators_for_tests():
with self.assertRaisesRegexp(
ValueError, '`train_steps_per_iteration` must be an integer.'):
experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps_per_iteration='123')
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host2:2222',
num_cores=15,
gpu_memory_fraction=0.314,)
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls(
[test.mock.call().start(), test.mock.call().join()])
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
for est in self._estimators_for_tests(config):
with self.assertRaises(ValueError):
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
for est in self._estimators_for_tests():
exp_strategy = saved_model_export_utils.make_export_strategy(
est,
None if isinstance(est, core_estimator.Estimator) else 'export_input',
exports_to_keep=None)
if isinstance(est, core_estimator.Estimator):
eval_metrics = None
saving_listeners = 'saving_listeners'
else:
eval_metrics = 'eval_metrics'
saving_listeners = None
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
export_strategies=(exp_strategy,),
eval_metrics=eval_metrics,
saving_listeners=saving_listeners)
ex.test()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, est.export_count)
def test_continuous_eval_evaluates_checkpoint_once(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
result = {
'called': 0,
'called_with_eval_result': 0,
}
# pylint: disable=cell-var-from-loop
def _predicate_fn(eval_result):
result['called'] += 1
if eval_result:
# If eval_result is not empty nor None, the checkpoint has been
# evaluated.
result['called_with_eval_result'] += 1
# With 300 times of evaluation, this should prove something.
return result['called'] < 300
# pylint: enable=cell-var-from-loop
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
ex.continuous_eval(evaluate_checkpoint_only_once=True,
continuous_eval_predicate_fn=_predicate_fn)
self.assertEqual(0, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(300, result['called'])
self.assertEqual(1, result['called_with_eval_result'])
def test_checkpoint_and_export(self):
model_dir = tempfile.mkdtemp()
config = run_config_lib.RunConfig(save_checkpoints_steps=3)
est = dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column('feature', dimension=4)
],
hidden_units=[3, 3],
model_dir=model_dir,
config=config)
exp_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn,
export_strategies=(exp_strategy,),
train_steps=8,
checkpoint_and_export=True,
eval_delay_secs=0)
with test.mock.patch.object(ex, '_maybe_export'):
with test.mock.patch.object(ex, '_call_evaluate'):
ex.train_and_evaluate()
# Eval and export are called after steps 1, 4, 7, and 8 (after training
# is completed).
self.assertEqual(ex._maybe_export.call_count, 4)
self.assertEqual(ex._call_evaluate.call_count, 4)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/experiment_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph actions tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from tensorflow.contrib import testing
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.monitors import BaseMonitor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
class _Feeder(object):
"""Simple generator for `feed_fn`, returning 10 * step."""
def __init__(self, tensor, max_step):
self._step = 0
self._tensor = tensor
self._max_step = max_step
@property
def step(self):
return self._step
def feed_fn(self):
if self._step >= self._max_step:
raise StopIteration
value = self._step * 10.0
self._step += 1
return {self._tensor: value}
class _BaseMonitorWrapper(BaseMonitor):
"""Base monitor wrapper to facilitate testing.
This monitor can act as either chief-exclusive or non-exclusive.
"""
def __init__(self, run_on_all_workers):
super(_BaseMonitorWrapper, self).__init__()
self._run_on_all_workers = run_on_all_workers
self._is_active = False
self._has_step = False
@property
def run_on_all_workers(self):
return self._run_on_all_workers
@property
def is_active(self):
return self._is_active
@property
def has_step(self):
return self._has_step
def begin(self, max_steps=None):
self._is_active = True
return super(_BaseMonitorWrapper, self).begin(max_steps)
def step_begin(self, step):
self._has_step = True
return super(_BaseMonitorWrapper, self).step_begin(step)
class GraphActionsTest(test.TestCase):
"""Graph actions tests."""
def setUp(self):
learn.graph_actions.clear_summary_writers()
self._output_dir = tempfile.mkdtemp()
testing.FakeSummaryWriter.install()
def tearDown(self):
testing.FakeSummaryWriter.uninstall()
if self._output_dir:
shutil.rmtree(self._output_dir)
learn.graph_actions.clear_summary_writers()
def _assert_summaries(self,
output_dir,
writer,
expected_summaries=None,
expected_graphs=None,
expected_meta_graphs=None,
expected_session_logs=None):
self.assertTrue(isinstance(writer, testing.FakeSummaryWriter))
writer.assert_summaries(
self,
expected_logdir=output_dir,
expected_graph=ops.get_default_graph(),
expected_summaries=expected_summaries,
expected_added_graphs=expected_graphs,
expected_added_meta_graphs=expected_meta_graphs,
expected_session_logs=expected_session_logs)
# TODO(ptucker): Test number and contents of checkpoint files.
def _assert_ckpt(self, output_dir, expected=True):
ckpt_state = checkpoint_management.get_checkpoint_state(output_dir)
if expected:
pattern = '%s/model.ckpt-.*' % output_dir
primary_ckpt_path = ckpt_state.model_checkpoint_path
self.assertRegexpMatches(primary_ckpt_path, pattern)
all_ckpt_paths = ckpt_state.all_model_checkpoint_paths
self.assertTrue(primary_ckpt_path in all_ckpt_paths)
for ckpt_path in all_ckpt_paths:
self.assertRegexpMatches(ckpt_path, pattern)
else:
self.assertTrue(ckpt_state is None)
# TODO(ptucker): Test lock, multi-threaded access?
def test_summary_writer(self):
writer = learn.graph_actions.get_summary_writer('log/dir/0')
self._assert_summaries('log/dir/0', writer)
self.assertTrue(
learn.graph_actions.get_summary_writer('log/dir/0') is
learn.graph_actions.get_summary_writer('log/dir/0'))
self.assertTrue(
learn.graph_actions.get_summary_writer('log/dir/0') is
not learn.graph_actions.get_summary_writer('log/dir/1'))
# TODO(ptucker): Test restore_checkpoint_path for eval; this should obsolete
# test_evaluate_with_saver().
# TODO(ptucker): Test start_queue_runners for both eval & train.
# TODO(ptucker): Test coord.request_stop & coord.join for eval.
def _build_inference_graph(self):
"""Build simple inference graph.
This includes a regular variable, local variable, and fake table.
Returns:
Tuple of 3 `Tensor` objects, 2 input and 1 output.
"""
variables_lib.create_global_step()
in0 = variables.VariableV1(1.0)
in1 = variables_lib.local_variable(2.0)
fake_table = variables.VariableV1(
3.0,
trainable=False,
collections=['fake_tables'],
name='fake_table_var')
in0.graph.add_to_collections([ops.GraphKeys.TABLE_INITIALIZERS],
fake_table.initializer)
out = in0 + in1 + fake_table
return in0, in1, out
def test_infer(self):
with ops.Graph().as_default() as g, self.session(g):
self._assert_ckpt(self._output_dir, False)
in0, in1, out = self._build_inference_graph()
self.assertEqual({
'a': 1.0,
'b': 2.0,
'c': 6.0
}, learn.graph_actions.infer(None, {'a': in0,
'b': in1,
'c': out}))
self._assert_ckpt(self._output_dir, False)
@test.mock.patch.object(
learn.graph_actions.coordinator.Coordinator,
'request_stop',
side_effect=learn.graph_actions.coordinator.Coordinator.request_stop,
autospec=True)
def test_coordinator_request_stop_called(self, request_stop):
with ops.Graph().as_default() as g, self.session(g):
in0, in1, out = self._build_inference_graph()
learn.graph_actions.infer(None, {'a': in0, 'b': in1, 'c': out})
self.assertTrue(request_stop.called)
@test.mock.patch.object(
learn.graph_actions.coordinator.Coordinator,
'request_stop',
side_effect=learn.graph_actions.coordinator.Coordinator.request_stop,
autospec=True)
def test_run_feeds_iter_cleanup_with_exceptions(self, request_stop):
with ops.Graph().as_default() as g, self.session(g):
in0, in1, out = self._build_inference_graph()
try:
for _ in learn.graph_actions.run_feeds_iter({
'a': in0,
'b': in1,
'c': out
}, [None] * 3):
self.assertFalse(request_stop.called)
raise ValueError('Fake exception')
except ValueError:
pass
self.assertTrue(request_stop.called)
def test_run_feeds_iter_calls_resources_init(self):
with ops.Graph().as_default():
in0, _, _ = self._build_inference_graph()
handle = test_ops.stub_resource_handle_op(container='a', shared_name='b')
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
for _ in learn.graph_actions.run_feeds_iter(
{
'in0': in0
}, feed_dicts=[{}]):
self.assertTrue(test_ops.resource_initialized_op(handle).eval())
def test_infer_different_default_graph(self):
with self.cached_session():
self._assert_ckpt(self._output_dir, False)
with ops.Graph().as_default():
in0, in1, out = self._build_inference_graph()
with ops.Graph().as_default():
self.assertEqual({
'a': 1.0,
'b': 2.0,
'c': 6.0
}, learn.graph_actions.infer(None, {'a': in0,
'b': in1,
'c': out}))
self._assert_ckpt(self._output_dir, False)
def test_infer_invalid_feed(self):
with ops.Graph().as_default() as g, self.session(g):
self._assert_ckpt(self._output_dir, False)
in0, _, _ = self._build_inference_graph()
with self.assertRaisesRegexp(TypeError, 'Can not convert a NoneType'):
learn.graph_actions.infer(None, {'a': in0}, feed_dict={None: 4.0})
self._assert_ckpt(self._output_dir, False)
def test_infer_feed(self):
with ops.Graph().as_default() as g, self.session(g):
self._assert_ckpt(self._output_dir, False)
in0, _, out = self._build_inference_graph()
self.assertEqual(
{
'c': 9.0
},
learn.graph_actions.infer(
None, {'c': out}, feed_dict={in0: 4.0}))
self._assert_ckpt(self._output_dir, False)
# TODO(ptucker): Test eval for 1 epoch.
def test_evaluate_invalid_args(self):
with ops.Graph().as_default() as g, self.session(g):
self._assert_ckpt(self._output_dir, False)
with self.assertRaisesRegexp(ValueError, 'utput directory'):
learn.graph_actions.evaluate(
g,
output_dir=None,
checkpoint_path=None,
eval_dict={'a': constant_op.constant(1.0)})
with self.assertRaisesRegexp(ValueError, 'utput directory'):
learn.graph_actions.evaluate(
g,
output_dir='',
checkpoint_path=None,
eval_dict={'a': constant_op.constant(1.0)})
self._assert_ckpt(self._output_dir, False)
def test_evaluate(self):
with ops.Graph().as_default() as g, self.session(g):
_, _, out = self._build_inference_graph()
writer = learn.graph_actions.get_summary_writer(self._output_dir)
self._assert_summaries(self._output_dir, writer, expected_session_logs=[])
self._assert_ckpt(self._output_dir, False)
results = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': out},
max_steps=1)
self.assertEqual(({'a': 6.0}, 0), results)
self._assert_summaries(
self._output_dir,
writer,
expected_summaries={0: {
'a': 6.0
}},
expected_session_logs=[])
self._assert_ckpt(self._output_dir, False)
def test_evaluate_ready_for_local_init(self):
with ops.Graph().as_default() as g, self.session(g):
variables_lib.create_global_step()
v = variables.VariableV1(1.0)
variables.VariableV1(
v + 1, collections=[ops.GraphKeys.LOCAL_VARIABLES], trainable=False)
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
ops.add_to_collection(ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
ready_for_local_init_op)
_ = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': v},
max_steps=1)
def test_evaluate_feed_fn(self):
with ops.Graph().as_default() as g, self.session(g):
in0, _, out = self._build_inference_graph()
writer = learn.graph_actions.get_summary_writer(self._output_dir)
self._assert_summaries(self._output_dir, writer, expected_session_logs=[])
self._assert_ckpt(self._output_dir, False)
feeder = _Feeder(in0, 3)
results = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': out},
feed_fn=feeder.feed_fn,
max_steps=3)
self.assertEqual(3, feeder.step)
self.assertEqual(({'a': 25.0}, 0), results)
self._assert_summaries(
self._output_dir,
writer,
expected_summaries={0: {
'a': 25.0
}},
expected_session_logs=[])
self._assert_ckpt(self._output_dir, False)
def test_evaluate_feed_fn_with_exhaustion(self):
with ops.Graph().as_default() as g, self.session(g):
in0, _, out = self._build_inference_graph()
writer = learn.graph_actions.get_summary_writer(self._output_dir)
self._assert_summaries(self._output_dir, writer, expected_session_logs=[])
feeder = _Feeder(in0, 2)
results = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': out},
feed_fn=feeder.feed_fn,
max_steps=3)
self.assertEqual(2, feeder.step)
self.assertEqual(({'a': 15.0}, 0), results)
self._assert_summaries(
self._output_dir,
writer,
expected_summaries={0: {
'a': 15.0
}},
expected_session_logs=[])
def test_evaluate_with_saver(self):
with ops.Graph().as_default() as g, self.session(g):
_, _, out = self._build_inference_graph()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
writer = learn.graph_actions.get_summary_writer(self._output_dir)
self._assert_summaries(self._output_dir, writer, expected_session_logs=[])
results = learn.graph_actions.evaluate(
g,
output_dir=self._output_dir,
checkpoint_path=None,
eval_dict={'a': out},
max_steps=1)
self.assertEqual(({'a': 6.0}, 0), results)
self._assert_summaries(
self._output_dir,
writer,
expected_summaries={0: {
'a': 6.0
}},
expected_session_logs=[])
# TODO(ptucker): Resume training from previous ckpt.
# TODO(ptucker): !supervisor_is_chief
# TODO(ptucker): Custom init op for training.
# TODO(ptucker): Mock supervisor, and assert all interactions.
# TODO(ispir): remove following tests after deprecated train.
class GraphActionsTrainTest(test.TestCase):
"""Tests for train."""
def setUp(self):
learn.graph_actions.clear_summary_writers()
self._output_dir = tempfile.mkdtemp()
testing.FakeSummaryWriter.install()
def tearDown(self):
testing.FakeSummaryWriter.uninstall()
if self._output_dir:
shutil.rmtree(self._output_dir)
learn.graph_actions.clear_summary_writers()
def _assert_summaries(self,
output_dir,
expected_summaries=None,
expected_graphs=None,
expected_meta_graphs=None,
expected_session_logs=None):
writer = learn.graph_actions.get_summary_writer(output_dir)
self.assertTrue(isinstance(writer, testing.FakeSummaryWriter))
writer.assert_summaries(
self,
expected_logdir=output_dir,
expected_graph=ops.get_default_graph(),
expected_summaries=expected_summaries,
expected_added_graphs=expected_graphs,
expected_added_meta_graphs=expected_meta_graphs,
expected_session_logs=expected_session_logs)
# TODO(ptucker): Test number and contents of checkpoint files.
def _assert_ckpt(self, output_dir, expected=True):
ckpt_state = checkpoint_management.get_checkpoint_state(output_dir)
if expected:
pattern = '%s/model.ckpt-.*' % output_dir
primary_ckpt_path = ckpt_state.model_checkpoint_path
self.assertRegexpMatches(primary_ckpt_path, pattern)
all_ckpt_paths = ckpt_state.all_model_checkpoint_paths
self.assertTrue(primary_ckpt_path in all_ckpt_paths)
for ckpt_path in all_ckpt_paths:
self.assertRegexpMatches(ckpt_path, pattern)
else:
self.assertTrue(ckpt_state is None)
def _build_inference_graph(self):
"""Build simple inference graph.
This includes a regular variable, local variable, and fake table.
Returns:
Tuple of 3 `Tensor` objects, 2 input and 1 output.
"""
variables_lib.create_global_step()
in0 = variables.VariableV1(1.0)
in1 = variables_lib.local_variable(2.0)
fake_table = variables.VariableV1(
3.0,
trainable=False,
collections=['fake_tables'],
name='fake_table_var')
in0.graph.add_to_collections([ops.GraphKeys.TABLE_INITIALIZERS],
fake_table.initializer)
out = in0 + in1 + fake_table
return in0, in1, out
def test_train_invalid_args(self):
with ops.Graph().as_default() as g, self.session(g):
train_op = constant_op.constant(1.0)
loss_op = constant_op.constant(2.0)
with self.assertRaisesRegexp(ValueError, 'utput directory'):
learn.graph_actions.train(
g, output_dir=None, train_op=train_op, loss_op=loss_op)
with self.assertRaisesRegexp(ValueError, 'utput directory'):
learn.graph_actions.train(
g,
output_dir='',
train_op=constant_op.constant(1.0),
loss_op=constant_op.constant(2.0))
with self.assertRaisesRegexp(ValueError, 'train_op'):
learn.graph_actions.train(
g, output_dir=self._output_dir, train_op=None, loss_op=loss_op)
with self.assertRaisesRegexp(ValueError, 'loss_op'):
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=constant_op.constant(1.0),
loss_op=None)
with self.assertRaisesRegexp(ValueError, 'global_step'):
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=constant_op.constant(1.0),
loss_op=loss_op)
# TODO(ptucker): Resume training from previous ckpt.
# TODO(ptucker): !supervisor_is_chief
# TODO(ptucker): Custom init op for training.
# TODO(ptucker): Mock supervisor, and assert all interactions.
def test_train(self):
with ops.Graph().as_default() as g, self.session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
steps=1)
# TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
# SaverDef, so we can't add it to the summary assertion test below.
# meta_graph_def = meta_graph.create_meta_graph_def()
self.assertEqual(2.0, loss)
self._assert_summaries(self._output_dir, expected_graphs=[g])
self._assert_ckpt(self._output_dir, True)
def test_train_steps_is_incremental(self):
with ops.Graph().as_default() as g, self.session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
steps=10)
step = checkpoint_utils.load_variable(
self._output_dir, variables_lib.get_global_step().name)
self.assertEqual(10, step)
with ops.Graph().as_default() as g, self.session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
steps=15)
step = checkpoint_utils.load_variable(
self._output_dir, variables_lib.get_global_step().name)
self.assertEqual(25, step)
def test_train_max_steps_is_not_incremental(self):
with ops.Graph().as_default() as g, self.session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
max_steps=10)
step = checkpoint_utils.load_variable(
self._output_dir, variables_lib.get_global_step().name)
self.assertEqual(10, step)
with ops.Graph().as_default() as g, self.session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=constant_op.constant(2.0),
max_steps=15)
step = checkpoint_utils.load_variable(
self._output_dir, variables_lib.get_global_step().name)
self.assertEqual(15, step)
def test_train_loss(self):
with ops.Graph().as_default() as g, self.session(g):
variables_lib.create_global_step()
loss_var = variables_lib.local_variable(10.0)
train_op = control_flow_ops.group(
state_ops.assign_add(variables_lib.get_global_step(), 1),
state_ops.assign_add(loss_var, -1.0))
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=loss_var.value(),
steps=6)
# TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
# SaverDef, so we can't add it to the summary assertion test below.
# meta_graph_def = meta_graph.create_meta_graph_def()
self.assertEqual(4.0, loss)
self._assert_summaries(self._output_dir, expected_graphs=[g])
self._assert_ckpt(self._output_dir, True)
def test_train_summaries(self):
with ops.Graph().as_default() as g, self.session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
loss_op = constant_op.constant(2.0)
summary.scalar('loss', loss_op)
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=loss_op,
steps=1)
# TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
# SaverDef, so we can't add it to the summary assertion test below.
# meta_graph_def = meta_graph.create_meta_graph_def()
self.assertEqual(2.0, loss)
self._assert_summaries(
self._output_dir,
expected_graphs=[g],
expected_summaries={1: {
'loss': 2.0
}})
self._assert_ckpt(self._output_dir, True)
def test_train_chief_monitor(self):
with ops.Graph().as_default() as g, self.session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
loss_op = constant_op.constant(2.0)
summary.scalar('loss', loss_op)
chief_exclusive_monitor = _BaseMonitorWrapper(False)
all_workers_monitor = _BaseMonitorWrapper(True)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=loss_op,
supervisor_is_chief=True,
steps=1,
monitors=[chief_exclusive_monitor, all_workers_monitor])
self.assertEqual(2.0, loss)
self.assertTrue(chief_exclusive_monitor.is_active and
all_workers_monitor.is_active,
'All monitors must have been active.')
self.assertTrue(chief_exclusive_monitor.has_step and
all_workers_monitor.has_step,
'All monitors must have a step.')
def test_train_worker_monitor(self):
# We need to explicitly set device due to check on non-chief workers
# requiring all variables to have a device assigned.
with ops.Graph().as_default() as g, g.device('/cpu:0'):
global_step = variables_lib.create_global_step(g)
train_op = state_ops.assign_add(global_step, 1)
loss_op = constant_op.constant(2.0)
summary.scalar('loss', loss_op)
# Add explicit "local" init op to initialize all variables
# as there's no chief to init here.
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, init_op)
# Create worker monitors where one should be active on the worker
# and the other chief exclusive.
chief_exclusive_monitor = _BaseMonitorWrapper(False)
all_workers_monitor = _BaseMonitorWrapper(True)
with self.session(g):
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
global_step_tensor=global_step,
train_op=train_op,
loss_op=loss_op,
supervisor_is_chief=False,
steps=1,
monitors=[chief_exclusive_monitor, all_workers_monitor])
self.assertEqual(2.0, loss)
self.assertTrue(not chief_exclusive_monitor.is_active and
all_workers_monitor.is_active,
'Only non-chief runnable monitor must have been active.')
self.assertTrue(not chief_exclusive_monitor.has_step and
all_workers_monitor.has_step,
'Only non-chief runnable monitor must have a step.')
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/graph_actions_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MetricSpec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=g-bad-todo,g-import-not-at-top
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.python.platform import test
class MetricSpecTest(test.TestCase):
def test_named_args_with_weights(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn0(predictions, labels, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
def _fn1(predictions, targets, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", targets)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
def _fn2(prediction, label, weight=None):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", label)
self.assertEqual("f2_value", weight)
return "metric_fn_result"
def _fn3(prediction, target, weight=None):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", target)
self.assertEqual("f2_value", weight)
return "metric_fn_result"
for fn in (_fn0, _fn1, _fn2, _fn3):
spec = MetricSpec(
metric_fn=fn, prediction_key="p1", label_key="l1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_args(self):
def _fn():
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(
{"f1": "f1_value"}, "labels_value", "predictions_value")
def test_kwargs(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(**kwargs):
self.assertEqual({}, kwargs)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_labels_no_predictions(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels):
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_labels_no_predictions_with_kwargs(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels, **kwargs):
self.assertEqual(labels_, labels)
self.assertEqual({}, kwargs)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_no_named_predictions_named_labels_first_arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels, predictions_by_another_name):
self.assertEqual(predictions_, predictions_by_another_name)
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_predictions_named_labels_second_arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(predictions_by_another_name, labels):
self.assertEqual(predictions_, predictions_by_another_name)
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(predictions):
self.assertEqual(predictions_, predictions)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels_or_predictions_1arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(a):
self.assertEqual(predictions_, a)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels_or_predictions_2args(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(a, b):
del a, b
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_args_no_weights(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn0(predictions, labels):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
return "metric_fn_result"
def _fn1(predictions, targets):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", targets)
return "metric_fn_result"
def _fn2(prediction, label):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", label)
return "metric_fn_result"
def _fn3(prediction, target):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", target)
return "metric_fn_result"
for fn in (_fn0, _fn1, _fn2, _fn3):
spec = MetricSpec(metric_fn=fn, prediction_key="p1", label_key="l1")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_predictions_dict_no_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn, label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec without specified prediction_key requires predictions"
" tensor or single element dict"):
spec.create_metric_ops(features, labels, predictions)
def test_labels_dict_no_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(labels, predictions, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn, prediction_key="p1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec without specified label_key requires labels tensor or"
" single element dict"):
spec.create_metric_ops(features, labels, predictions)
def test_single_prediction(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = "p1_value"
def _fn(predictions, labels, weights=None):
self.assertEqual(predictions_, predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn, label_key="l1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_single_label(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = "l1_value"
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual(labels_, labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn, prediction_key="p1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_single_predictions_with_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = "p1_value"
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(
metric_fn=_fn, prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec with prediction_key specified requires predictions dict"):
spec.create_metric_ops(features, labels, predictions)
def test_single_labels_with_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = "l1_value"
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(
metric_fn=_fn, prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError, "MetricSpec with label_key specified requires labels dict"):
spec.create_metric_ops(features, labels, predictions)
def test_str(self):
def _metric_fn(labels, predictions, weights=None):
return predictions, labels, weights
string = str(MetricSpec(
metric_fn=_metric_fn,
label_key="my_label",
prediction_key="my_prediction",
weight_key="my_weight"))
self.assertIn("_metric_fn", string)
self.assertIn("my_label", string)
self.assertIn("my_prediction", string)
self.assertIn("my_weight", string)
def test_partial_str(self):
def custom_metric(predictions, labels, stuff, weights=None):
return predictions, labels, weights, stuff
string = str(MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=5),
label_key="my_label",
prediction_key="my_prediction",
weight_key="my_weight"))
self.assertIn("custom_metric", string)
self.assertIn("my_label", string)
self.assertIn("my_prediction", string)
self.assertIn("my_weight", string)
def test_partial(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def custom_metric(predictions, labels, stuff, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
if stuff:
return "metric_fn_result"
raise ValueError("No stuff.")
spec = MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=5),
label_key="l1",
prediction_key="p1",
weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels, predictions))
spec = MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=None),
prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(ValueError, "No stuff."):
spec.create_metric_ops(features, labels, predictions)
def test_label_key_without_label_arg(self):
def _fn0(predictions, weights=None):
del predictions, weights
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, weight=None):
del prediction, weight
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1):
with self.assertRaisesRegexp(ValueError, "label.*missing"):
MetricSpec(metric_fn=fn, label_key="l1")
def test_weight_key_without_weight_arg(self):
def _fn0(predictions, labels):
del predictions, labels
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, label):
del prediction, label
self.fail("Expected failure before metric_fn.")
def _fn2(predictions, targets):
del predictions, targets
self.fail("Expected failure before metric_fn.")
def _fn3(prediction, target):
del prediction, target
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1, _fn2, _fn3):
with self.assertRaisesRegexp(ValueError, "weight.*missing"):
MetricSpec(metric_fn=fn, weight_key="f2")
def test_multiple_label_args(self):
def _fn0(predictions, labels, targets):
del predictions, labels, targets
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, label, target):
del prediction, label, target
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1):
with self.assertRaisesRegexp(ValueError, "provide only one of.*label"):
MetricSpec(metric_fn=fn)
def test_multiple_prediction_args(self):
def _fn(predictions, prediction, labels):
del predictions, prediction, labels
self.fail("Expected failure before metric_fn.")
with self.assertRaisesRegexp(ValueError, "provide only one of.*prediction"):
MetricSpec(metric_fn=_fn)
def test_multiple_weight_args(self):
def _fn(predictions, labels, weights=None, weight=None):
del predictions, labels, weights, weight
self.fail("Expected failure before metric_fn.")
with self.assertRaisesRegexp(ValueError, "provide only one of.*weight"):
MetricSpec(metric_fn=_fn)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/metric_spec_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning with TensorFlow (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import basic_session_run_hooks
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import estimators
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import learn_io as io
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import ops
from tensorflow.contrib.learn.python.learn import preprocessing
from tensorflow.contrib.learn.python.learn import utils
from tensorflow.contrib.learn.python.learn.estimators import *
from tensorflow.contrib.learn.python.learn.evaluable import Evaluable
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.contrib.learn.python.learn.export_strategy import ExportStrategy
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
from tensorflow.contrib.learn.python.learn.graph_actions import run_feeds
from tensorflow.contrib.learn.python.learn.graph_actions import run_n
from tensorflow.contrib.learn.python.learn.graph_actions import train
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.learn.python.learn.monitors import NanLossDuringTrainingError
from tensorflow.contrib.learn.python.learn.trainable import Trainable
from tensorflow.contrib.learn.python.learn.utils import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes (deprected).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.util.deprecation import deprecated_alias
# pylint: disable=invalid-name
LoggingTensorHook = deprecated_alias(
'tf.contrib.learn.basic_session_run_hooks.LoggingTensorHook',
'tf.train.LoggingTensorHook',
basic_session_run_hooks.LoggingTensorHook)
StopAtStepHook = deprecated_alias(
'tf.contrib.learn.basic_session_run_hooks.StopAtStepHook',
'tf.train.StopAtStepHook',
basic_session_run_hooks.StopAtStepHook)
CheckpointSaverHook = deprecated_alias(
'tf.contrib.learn.basic_session_run_hooks.CheckpointSaverHook',
'tf.train.CheckpointSaverHook',
basic_session_run_hooks.CheckpointSaverHook)
StepCounterHook = deprecated_alias(
'tf.contrib.learn.basic_session_run_hooks.StepCounterHook',
'tf.train.StepCounterHook',
basic_session_run_hooks.StepCounterHook)
NanLossDuringTrainingError = deprecated_alias(
'tf.contrib.learn.basic_session_run_hooks.NanLossDuringTrainingError',
'tf.train.NanLossDuringTrainingError',
basic_session_run_hooks.NanLossDuringTrainingError)
NanTensorHook = deprecated_alias(
'tf.contrib.learn.basic_session_run_hooks.NanTensorHook',
'tf.train.NanTensorHook',
basic_session_run_hooks.NanTensorHook)
SummarySaverHook = deprecated_alias(
'tf.contrib.learn.basic_session_run_hooks.SummarySaverHook',
'tf.train.SummarySaverHook',
basic_session_run_hooks.SummarySaverHook)
# pylint: enable=invalid-name
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`Evaluable` interface (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Evaluable(object):
"""Interface for objects that are evaluatable by, e.g., `Experiment`.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@abc.abstractproperty
def model_dir(self):
"""Returns a path in which the eval process will look for checkpoints."""
raise NotImplementedError
@abc.abstractmethod
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""Evaluates given model with provided evaluation data.
Stop conditions - we evaluate on the given input data until one of the
following:
- If `steps` is provided, and `steps` batches of size `batch_size` are
processed.
- If `input_fn` is provided, and it raises an end-of-input
exception (`OutOfRangeError` or `StopIteration`).
- If `x` is provided, and all items in `x` have been processed.
The return value is a dict containing the metrics specified in `metrics`, as
well as an entry `global_step` which contains the value of the global step
for which this evaluation was performed.
Args:
x: Matrix of shape [n_samples, n_features...] or dictionary of many
matrices
containing the input samples for fitting the model. Can be iterator that
returns
arrays of features or dictionary of array of features. If set,
`input_fn` must
be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs] containing the
label values (class labels in classification, real numbers in
regression) or dictionary of multiple vectors/matrices. Can be iterator
that returns array of targets or dictionary of array of targets. If set,
`input_fn` must be `None`. Note: For classification, label values must
be integers representing the class index (i.e. values from 0 to
n_classes-1).
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
If input_fn is set, `x`, `y`, and `batch_size` must be `None`. If
`steps` is not provided, this should raise `OutOfRangeError` or
`StopIteration` after the desired amount of data (e.g., one epoch) has
been provided. See "Stop conditions" above for specifics.
feed_fn: Function creating a feed dict every time it is called. Called
once per iteration. Must be `None` if `input_fn` is provided.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`, if specified. Must be `None` if `input_fn` is
provided.
steps: Number of steps for which to evaluate model. If `None`, evaluate
until `x` is consumed or `input_fn` raises an end-of-input exception.
See "Stop conditions" above for specifics.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function.
Metric ops should support streaming, e.g., returning `update_op` and
`value` tensors. For example, see the options defined in
`../../../metrics/python/ops/metrics_ops.py`.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
Returns:
Returns `dict` with evaluation results.
"""
raise NotImplementedError
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/evaluable.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks (deprecated).
These are deprecated aliases for classes and functions in `tf.train`. Please use
those directly.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import monitored_session
# pylint: disable=invalid-name
Scaffold = monitored_session.Scaffold
SessionCreator = monitored_session.SessionCreator
ChiefSessionCreator = monitored_session.ChiefSessionCreator
WorkerSessionCreator = monitored_session.WorkerSessionCreator
MonitoredSession = monitored_session.MonitoredSession
# pylint: disable=invalid-name
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/monitored_session.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for a Session-like object that handles threads and recovery (deprecated).
These are deprecated aliases for classes and functions in `tf.train`. Please use
those directly.
Based on an original design of Illia Polosukhin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import summary_io
SummaryWriterCache = summary_io.SummaryWriterCache # pylint: disable=invalid-name
# Backward compatible interface. Remove?
clear_summary_writers = SummaryWriterCache.clear
get_summary_writer = SummaryWriterCache.get
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/summary_writer_cache.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment class collecting information for a single training run (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import math
import os
import time
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
__all__ = ["Experiment"]
def _get_standardized_predicate_fn(predicate_fn):
pred_fn_args = function_utils.fn_args(predicate_fn)
if "checkpoint_path" not in pred_fn_args:
# pylint: disable=unused-argument
def _pred_fn_wrapper(eval_results, checkpoint_path):
return predicate_fn(eval_results)
return _pred_fn_wrapper
else:
return predicate_fn
class _EvalAndExportListener(basic_session_run_hooks.CheckpointSaverListener):
"""Listener that evaluates and exports a model after creating a checkpoint.
The `EvalAndExportListener` waits for the associated `CheckpointSaverHook`
to save a checkpoint. It then uses the provided `eval_fn` and `export_fn` to
first evaluate the model using the newly-created checkpoint, and then export
the model according to the `export_strategies` provided in the `Experiment`.
This listener is experimental and may be changed or removed in the future.
"""
def __init__(self, eval_fn, export_fn, model_dir):
"""Initializes an `EvalAndExportListener`.
Args:
eval_fn: function which evaluates the model with the following signature:
`(name, checkpoint_path) -> eval_result`
export_fn: function which exports the model according to a set of export
strategies. Has the following signature:
`(eval_result, checkpoint_path) -> export_results`
model_dir: directory which contains estimator parameters and checkpoints.
"""
self._eval_fn = eval_fn
self._export_fn = export_fn
self._model_dir = model_dir
self._latest_path = None
self._eval_result = None
self._export_results = None
def after_save(self, session, global_step_value):
"""Evaluates and exports the model after a checkpoint is created."""
# Load and cache the path of the most recent checkpoint to avoid duplicate
# searches on GCS.
logging.info("Checking for checkpoint in %s", self._model_dir)
latest_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not latest_path:
logging.warning("Skipping evaluation and export since model has not been "
"saved yet.")
elif latest_path == self._latest_path:
logging.warning("Skipping evaluation due to same latest checkpoint %s.",
latest_path)
else:
self._latest_path = latest_path
self._eval_result = self._eval_fn(
name="intermediate_export", checkpoint_path=latest_path)
self._export_results = self._export_fn(
self._eval_result, checkpoint_path=latest_path)
@property
def eval_result(self):
return self._eval_result
@property
def export_results(self):
return self._export_results
class Experiment(object):
"""Experiment is a class containing all information needed to train a model.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
After an experiment is created (by passing an Estimator and inputs for
training and evaluation), an Experiment instance knows how to invoke training
and eval loops in a sensible fashion for distributed training.
"""
# TODO(ispir): remove delay_workers_by_global_step and make global step based
# waiting as only behavior.
@deprecated(None, "Please switch to tf.estimator.train_and_evaluate. You will"
" also have to convert to a tf.estimator.Estimator.")
def __init__(self,
estimator,
train_input_fn,
eval_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=100,
train_monitors=None,
eval_hooks=None,
local_eval_frequency=None,
eval_delay_secs=120,
continuous_eval_throttle_secs=60,
min_eval_frequency=None,
delay_workers_by_global_step=False,
export_strategies=None,
train_steps_per_iteration=None,
checkpoint_and_export=False,
saving_listeners=None,
check_interval_secs=5):
"""Constructor for `Experiment`.
Creates an Experiment instance. None of the functions passed to this
constructor are executed at construction time. They are stored and used
when a method is executed which requires it.
Args:
estimator: Object implementing Estimator interface, which could be a
combination of `tf.contrib.learn.Trainable` and
`tf.contrib.learn.Evaluable` (deprecated), or
`tf.estimator.Estimator`.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used. This should be `None` if the `estimator` is
`tf.estimator.Estimator`. If metrics are provided they will be
*appended* to the default set.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
eval_steps: `evaluate` runs until input is exhausted (or another exception
is raised), or for `eval_steps` steps, if specified.
train_monitors: A list of monitors to pass to the `Estimator`'s `fit`
function.
eval_hooks: A list of `SessionRunHook` hooks to pass to the
`Estimator`'s `evaluate` function.
local_eval_frequency: (applies only to local_run) Frequency of running
eval in steps. If `None`, runs evaluation only at the end of training.
eval_delay_secs: Start evaluating after waiting for this many seconds.
continuous_eval_throttle_secs: Do not re-evaluate unless the last
evaluation was started at least this many seconds ago for
continuous_eval().
min_eval_frequency: (applies only to train_and_evaluate). the minimum
number of steps between evaluations. Of course, evaluation does not
occur if no new snapshot is available, hence, this is the minimum.
If 0, the evaluation will only happen after training.
If None, defaults to 1. To avoid checking for new checkpoints too
frequent, the interval is further limited to be at least
check_interval_secs between checks.
delay_workers_by_global_step: if `True` delays training workers
based on global step instead of time.
export_strategies: Iterable of `ExportStrategy`s, or a single one, or
`None`.
train_steps_per_iteration: (applies only to continuous_train_and_eval).
Perform this many (integer) number of train steps for each
training-evaluation iteration. With a small value, the model will be
evaluated more frequently with more checkpoints saved. If `None`, will
use a default value (which is smaller than `train_steps` if provided).
checkpoint_and_export: (applies only to train_and_evaluate). If `True`,
performs intermediate model checkpoints and exports during the training
process, rather than only once model training is complete. This
parameter is experimental and may be changed or removed in the future.
Setting this parameter leads to the following: the value of
`min_eval_frequency` will be ignored, and the number of steps between
evaluations and exports will instead be determined by the Estimator
configuration parameters `save_checkpoints_secs` and
`save_checkpoints_steps`. Also, this parameter leads to the creation of
a default `CheckpointSaverHook` instead of a `ValidationMonitor`, so the
provided `train_monitors` will need to be adjusted accordingly.
saving_listeners: list of `CheckpointSaverListener` objects. Used by
tf.estimator.Estimator for callbacks that run immediately before or
after checkpoint savings.
check_interval_secs:
Minimum time between subsequent checks for a new checkpoint. This
mostly applies if both min_eval_frequency and the time spent per
training step is low.
Raises:
ValueError: if `estimator` does not implement Estimator interface,
or if export_strategies has the wrong type.
"""
if isinstance(estimator, core_estimator.Estimator):
self._core_estimator_used = True
if eval_metrics is not None:
raise ValueError(
"`eval_metrics` must be `None` with `tf.estimator.Estimator`. "
"Use `eval_metric_ops` in `tf.estimator.EstimatorSpec` instead.")
else:
self._core_estimator_used = False
if not isinstance(estimator, evaluable.Evaluable):
raise ValueError(
"`estimator` must implement `tf.contrib.learn.Evaluable` "
"or `tf.estimator.Estimator`.")
if not isinstance(estimator, trainable.Trainable):
raise ValueError(
"`estimator` must implement `tf.contrib.learn.Trainable`"
"or `tf.estimator.`Estimator`.")
if saving_listeners is not None:
raise ValueError("`saving_listeners` must be `None` with "
"`tf.contrib.learn.Estimator`.")
if isinstance(estimator, tpu_estimator.TPUEstimator):
logging.warn(
"`Experiment` class cannot work with `tf.contrib.tpu.TPUEstimator`. "
"Please call `TPUEstimator` train/evaluate directly. \n"
"Details: `Experiment` class is designed for between-graph "
"distributed training, while `TPUEstimator` is working in in-graph "
"distributed mode. Use with care.")
super(Experiment, self).__init__()
# Immutable fields.
self._estimator = estimator
self._train_input_fn = train_input_fn
self._eval_input_fn = eval_input_fn
self._eval_metrics = eval_metrics
self._train_steps = train_steps
self._eval_steps = eval_steps
self._local_eval_frequency = local_eval_frequency
self._eval_delay_secs = eval_delay_secs
self._continuous_eval_throttle_secs = continuous_eval_throttle_secs
self._checkpoint_and_export = checkpoint_and_export
self._saving_listeners = saving_listeners
self._min_eval_frequency = min_eval_frequency if (
min_eval_frequency is not None) else 1
self._check_interval_secs = check_interval_secs
self._delay_workers_by_global_step = delay_workers_by_global_step
self._train_monitors = train_monitors[:] if train_monitors else []
self._eval_hooks = eval_hooks[:] if eval_hooks else []
self._set_export_strategies(export_strategies)
self._train_steps_per_iteration = train_steps_per_iteration
if (self._train_steps_per_iteration is not None and
not isinstance(self._train_steps_per_iteration, int)):
raise ValueError("`train_steps_per_iteration` must be an integer.")
@property
def estimator(self):
return self._estimator
@property
def eval_metrics(self):
return self._eval_metrics
@property
def train_steps(self):
return self._train_steps
@property
def eval_steps(self):
return self._eval_steps
def _set_export_strategies(self, values): # pylint: disable=missing-docstring
export_strategies = []
if values:
if isinstance(values, export_strategy.ExportStrategy):
export_strategies.append(values)
else:
for value in values:
if not isinstance(value, export_strategy.ExportStrategy):
raise ValueError("`export_strategies` must be an ExportStrategy,"
" an iterable of ExportStrategy, or `None`,"
" found %s." % value)
export_strategies.append(value)
self._export_strategies = tuple(export_strategies)
def extend_train_hooks(self, additional_hooks):
"""Extends the hooks for training."""
self._train_monitors.extend(additional_hooks)
def reset_export_strategies(self, new_export_strategies=None):
"""Resets the export strategies with the `new_export_strategies`.
Args:
new_export_strategies: A new list of `ExportStrategy`s, or a single one,
or None.
Returns:
The old export strategies.
"""
old_export_strategies = self._export_strategies
self._set_export_strategies(new_export_strategies)
return old_export_strategies
def train(self, delay_secs=None):
"""Fit the estimator using the training data.
Train the estimator for `self._train_steps` steps, after waiting for
`delay_secs` seconds. If `self._train_steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
start = time.time()
# Start the server, if needed. It's important to start the server before
# we (optionally) sleep for the case where no device_filters are set.
# Otherwise, the servers will wait to connect to each other before starting
# to train. We might as well start as soon as we can.
config = self._estimator.config
if isinstance(config, run_config.RunConfig):
if (config.cluster_spec and config.master and
config.environment == run_config.Environment.LOCAL):
logging.warn("ClusterSpec and master are provided, but environment is "
"set to 'local'. Set environment to 'cloud' if you intend "
"to use the distributed runtime.")
if (config.environment != run_config.Environment.LOCAL and
config.environment != run_config.Environment.GOOGLE and
config.cluster_spec and config.master):
self._start_server()
elif config.cluster_spec and config.master:
raise ValueError(
"For distributed runtime, Experiment class only works with "
"tf.contrib.learn.RunConfig for now, but provided {}".format(
type(config)))
extra_hooks = []
if delay_secs is None:
task_id = self._estimator.config.task_id or 0
if self._delay_workers_by_global_step:
# Wait 5500 global steps for the second worker. Each worker waits more
# then previous one but with a diminishing number of steps.
extra_hooks.append(
basic_session_run_hooks.GlobalStepWaiterHook(
int(8000.0 * math.log(task_id + 1))))
delay_secs = 0
else:
# Wait 5 secs more for each new worker up to 60 secs.
delay_secs = min(60, task_id * 5)
if delay_secs > 0:
elapsed_secs = time.time() - start
remaining = delay_secs - elapsed_secs
logging.info("Waiting %d secs before starting training.", remaining)
time.sleep(delay_secs)
return self._call_train(
input_fn=self._train_input_fn,
max_steps=self._train_steps,
hooks=self._train_monitors + extra_hooks,
saving_listeners=self._saving_listeners)
def evaluate(self, delay_secs=None, name=None):
"""Evaluate on the evaluation data.
Runs evaluation on the evaluation data and returns the result. Runs for
`self._eval_steps` steps, or if it's `None`, then run until input is
exhausted or another exception is raised. Start the evaluation after
`delay_secs` seconds, or if it's `None`, defaults to using
`self._eval_delay_secs` seconds.
Args:
delay_secs: Start evaluating after this many seconds. If `None`, defaults
to using `self._eval_delays_secs`.
name: Gives the name to the evauation for the case multiple evaluation is
run for the same experiment.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if delay_secs:
logging.info("Waiting %d secs before starting eval.", delay_secs)
time.sleep(delay_secs)
return self._call_evaluate(
input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=(name or "one_pass"),
hooks=self._eval_hooks)
@deprecated(
"2016-10-23",
"local_run will be renamed to train_and_evaluate and the new default "
"behavior will be to run evaluation every time there is a new "
"checkpoint.")
def local_run(self):
with _new_attr_context(self, "_min_eval_frequency"):
self._min_eval_frequency = self._local_eval_frequency
return self.train_and_evaluate()
# TODO(xiejw): Allow continuous_eval_predicate_fn to be passed via constructor
# once stopping all jobs is implemented.
def _continuous_eval(self,
input_fn,
name,
delay_secs,
throttle_delay_secs,
evaluate_checkpoint_only_once=True,
continuous_eval_predicate_fn=None,
export=True):
"""Run continuous eval.
Runs infinite eval on the evaluation data set. This function starts
evaluating after `delay_secs` seconds and then runs no more than one
evaluation (with `self._eval_steps` steps each time) per
`throttle_delay_secs`. If `train_steps` is not None, will return after
global_step reaches `train_steps`.
Args:
input_fn: The input to use for this eval.
name: A string appended to the folder name of evaluation results.
delay_secs: Start evaluating after this many seconds. If None, defaults to
self._eval_delay_secs.
throttle_delay_secs: Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. If None, defaults to
self._continuous_eval_throttle_secs.
evaluate_checkpoint_only_once: Whether to skip evaluation of checkpoints
that have already been evaluated. Default is `True`.
continuous_eval_predicate_fn: A predicate function determining whether to
continue eval after each iteration. A `predicate_fn` has one of the
following signatures:
* (eval_results) -> boolean
* (eval_results, checkpoint_path) -> boolean
Where `eval_results` is the dictionary of metric evaluations and
checkpoint_path is the path to the checkpoint containing the parameters
on which that evaluation was based.
At the beginning of evaluation, the passed `eval_results` will be None
so it's expected that the predicate function handles that gracefully.
Continuous eval behavior under different conditions:
* When `predicate_fn` is specified:
+ if `train_steps` is None, run until `predicate_fn` returns False.
+ if `train_steps` is specified, run until either global step
reaches `train_steps` or `predicate_fn` returns False.
* When `predicate_fn` is not specified:
+ if `train_steps` is None, run in an infinite loop.
+ if `train_steps` is specified, run until global step reaches
`train_steps`.
export: Whether to export from this step. Default is 'True'.
Raises:
ValueError: if `continuous_eval_predicate_fn` is neither None nor
callable.
"""
if continuous_eval_predicate_fn is not None:
if not callable(continuous_eval_predicate_fn):
raise ValueError(
"`continuous_eval_predicate_fn` must be a callable, or None.")
predicate_fn = _get_standardized_predicate_fn(
continuous_eval_predicate_fn)
else:
predicate_fn = None
if delay_secs is None:
delay_secs = self._eval_delay_secs
if throttle_delay_secs is None:
throttle_delay_secs = self._continuous_eval_throttle_secs
if delay_secs:
logging.info("Waiting %f secs before starting eval.", delay_secs)
time.sleep(delay_secs)
previous_path = None
eval_result = None
last_warning_time = 0
while (not predicate_fn or predicate_fn(
eval_result, checkpoint_path=previous_path)):
# Exit if we have already reached number of steps to train.
if self._has_training_stopped(eval_result):
logging.info("Exiting continuous eval, global_step=%s >= "
"train_step=%s", eval_result[ops.GraphKeys.GLOBAL_STEP],
self._train_steps)
return
start = time.time()
error_msg = None
latest_path = checkpoint_management.latest_checkpoint(
self._estimator.model_dir)
if not latest_path:
error_msg = ("Estimator is not fitted yet. "
"Will start an evaluation when a checkpoint is ready.")
elif evaluate_checkpoint_only_once and latest_path == previous_path:
error_msg = "No new checkpoint ready for evaluation."
if error_msg:
# Print warning message every 10 mins.
eval_result = {}
if time.time() - last_warning_time > 600:
logging.warning(error_msg)
last_warning_time = time.time()
else:
eval_result = self._call_evaluate(
input_fn=input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=name,
checkpoint_path=latest_path,
hooks=self._eval_hooks)
# Ensure eval result is not None for next round of evaluation.
if not eval_result:
eval_result = {}
if export:
self._maybe_export(eval_result, checkpoint_path=latest_path)
# Clear warning timer and update last evaluated checkpoint
last_warning_time = 0
previous_path = latest_path
duration = time.time() - start
if duration < throttle_delay_secs:
difference = throttle_delay_secs - duration
logging.info("Waiting %f secs before starting next eval run.",
difference)
time.sleep(difference)
def _has_training_stopped(self, eval_result):
"""Determines whether the training has stopped."""
if not eval_result:
return False
global_step = eval_result.get(ops.GraphKeys.GLOBAL_STEP)
return global_step and self._train_steps and (global_step >=
self._train_steps)
def continuous_eval(self,
delay_secs=None,
throttle_delay_secs=None,
evaluate_checkpoint_only_once=True,
continuous_eval_predicate_fn=None,
name="continuous"):
self._continuous_eval(
self._eval_input_fn,
name=name,
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs,
evaluate_checkpoint_only_once=evaluate_checkpoint_only_once,
continuous_eval_predicate_fn=continuous_eval_predicate_fn)
def continuous_eval_on_train_data(self,
delay_secs=None,
throttle_delay_secs=None,
continuous_eval_predicate_fn=None,
name="continuous_on_train_data"):
self._continuous_eval(
self._train_input_fn,
name=name,
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs,
continuous_eval_predicate_fn=continuous_eval_predicate_fn,
export=False)
def train_and_evaluate(self):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the constructor arg
`min_eval_frequency`. When this parameter is 0, evaluation happens
only after training has completed. Note that evaluation cannot happen
more frequently than checkpoints are taken. If no new snapshots are
available when evaluation is supposed to occur, then evaluation doesn't
happen for another `min_eval_frequency` steps (assuming a checkpoint is
available at that point). Thus, settings `min_eval_frequency` to 1 means
that the model will be evaluated everytime there is a new checkpoint.
This is particular useful for a "Master" task in the cloud, whose
responsibility it is to take checkpoints, evaluate those checkpoints,
and write out summaries. Participating in training as the supervisor
allows such a task to accomplish the first and last items, while
performing evaluation allows for the second.
Returns:
The result of the `evaluate` call to the `Estimator` as well as the
export results using the specified `ExportStrategy`.
"""
# The directory to which evaluation summaries are written are determined
# by adding a suffix to 'eval'; that suffix is the 'name' parameter to
# the various evaluate(...) methods. By setting it to None, we force
# the directory name to simply be 'eval'.
eval_dir_suffix = None
# We set every_n_steps to 1, but evaluation only occurs when a new
# snapshot is available. If, by the time we finish evaluation
# there is a new snapshot, then we just evaluate again. Otherwise,
# we keep training until one becomes available.
with _new_attr_context(self, "_train_monitors"):
self._train_monitors = self._train_monitors or []
config = self._estimator.config
intermediate_export = self._checkpoint_and_export and (
config.save_checkpoints_secs or config.save_checkpoints_steps)
if intermediate_export:
# Create a partially specified evaluate function with the desired
# arguments. This will be executed by the _EvalAndExportListener,
# which will specify the latest checkpoint path.
eval_fn = functools.partial(
self._call_evaluate,
input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
hooks=self._eval_hooks)
export_listener = _EvalAndExportListener(
eval_fn=eval_fn,
export_fn=self._maybe_export,
model_dir=self._estimator.model_dir)
saver_hook = basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir=self._estimator.model_dir,
save_secs=config.save_checkpoints_secs,
save_steps=config.save_checkpoints_steps,
listeners=[export_listener])
self._train_monitors += [saver_hook]
else:
if self._min_eval_frequency:
# Using low min_eval_frequency (default is 1) on a non-cached file
# system requires a lot of overhead to read the checkpoint state file.
# This is particular bad on GCS and CNS. See also b/36498507 for
# context. `check_interval_secs = 5` avoids polling a remote
# fileystem too often.
self._train_monitors += [
monitors.ValidationMonitor(
input_fn=self._eval_input_fn,
eval_steps=self._eval_steps,
metrics=self._eval_metrics,
every_n_steps=self._min_eval_frequency,
check_interval_secs=self._check_interval_secs,
name=eval_dir_suffix,
hooks=self._eval_hooks)
]
self.train(delay_secs=0)
# If the checkpoint_and_export flag and appropriate estimator configuration
# parameters are set, then model evaluations and exports are done during the
# training process. In particular, this will always occur at the end of
# training, so we return the most recent results to avoid performing a
# duplicate evaluation and model export.
if intermediate_export:
return export_listener.eval_result, export_listener.export_results
else:
eval_result = self._call_evaluate(
input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=eval_dir_suffix,
hooks=self._eval_hooks)
export_results = self._maybe_export(eval_result)
return eval_result, export_results
@experimental
def continuous_train_and_eval(self, continuous_eval_predicate_fn=None):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the `train_steps_per_iteration`
(via constructor). The model will be first trained for
`train_steps_per_iteration`, and then be evaluated in turns.
This method is intended for single machine usage.
This differs from `train_and_evaluate` as follows:
1. The procedure will have train and evaluation in turns. The model
will be trained for a number of steps (usually smaller than `train_steps`
if provided) and then be evaluated. `train_and_evaluate` will train the
model for `train_steps` (no small training iterations).
2. Due to the different approach this schedule takes, it leads to two
differences in resource control. First, the resources (e.g., memory) used
by training will be released before evaluation (`train_and_evaluate` takes
double resources). Second, more checkpoints will be saved as a checkpoint
is generated at the end of each training iteration.
3. As the estimator.train starts from scratch (new graph, new states for
input, etc) at each iteration, it is recommended to have the
`train_steps_per_iteration` larger. It is also recommended to shuffle your
input.
Args:
continuous_eval_predicate_fn: A predicate function determining whether to
continue eval after each iteration. A `predicate_fn` has one of the
following signatures:
* (eval_results) -> boolean
* (eval_results, checkpoint_path) -> boolean
Where `eval_results` is the dictionary of metric evaluations and
checkpoint_path is the path to the checkpoint containing the parameters
on which that evaluation was based.
At the beginning of evaluation, the passed `eval_results` and
`checkpoint_path` will be None so it's expected that the predicate
function handles that gracefully.
When `predicate_fn` is not specified, continuous eval will run in an
infinite loop (if `train_steps` is None). or exit once global step
reaches `train_steps`.
Returns:
A tuple of the result of the `evaluate` call to the `Estimator` and the
export results using the specified `ExportStrategy`.
Raises:
ValueError: if `continuous_eval_predicate_fn` is neither None nor
callable.
"""
if continuous_eval_predicate_fn is not None:
if not callable(continuous_eval_predicate_fn):
raise ValueError(
"`continuous_eval_predicate_fn` must be a callable, or None.")
predicate_fn = _get_standardized_predicate_fn(
continuous_eval_predicate_fn)
else:
predicate_fn = None
export_results = None
latest_checkpoint = None
eval_result = None
# Set the default value for train_steps_per_iteration, which will be
# overridden by other settings.
train_steps_per_iteration = 1000
if self._train_steps_per_iteration is not None:
train_steps_per_iteration = self._train_steps_per_iteration
elif self._train_steps is not None:
train_steps_per_iteration = int(self._train_steps / 10)
while (not predicate_fn or predicate_fn(
eval_result, checkpoint_path=latest_checkpoint
if eval_result else None)):
if self._has_training_stopped(eval_result):
# Exits once max steps of training is satisfied.
logging.info("Stop training model as max steps reached")
break
logging.info("Training model for %s steps", train_steps_per_iteration)
self._call_train(
input_fn=self._train_input_fn,
steps=train_steps_per_iteration,
hooks=self._train_monitors,
saving_listeners=self._saving_listeners)
logging.info("Evaluating model now.")
latest_checkpoint = checkpoint_management.latest_checkpoint(
self._estimator.model_dir)
eval_result = self._call_evaluate(
input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass",
checkpoint_path=latest_checkpoint,
hooks=self._eval_hooks)
export_results = self._maybe_export(eval_result)
return eval_result, export_results
def _maybe_export(self, eval_result, checkpoint_path=None):
"""Export the Estimator using export_fn, if defined."""
export_dir_base = os.path.join(
compat.as_bytes(self._estimator.model_dir), compat.as_bytes("export"))
export_results = []
for strategy in self._export_strategies:
export_results.append(
strategy.export(
self._estimator,
os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(strategy.name)),
checkpoint_path=checkpoint_path,
eval_result=eval_result))
return export_results
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
self._start_server().join()
def test(self):
"""Tests training, evaluating and exporting the estimator for a single step.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
self._call_train(
input_fn=self._train_input_fn,
steps=1,
hooks=self._train_monitors,
saving_listeners=self._saving_listeners)
eval_result = self._call_evaluate(
input_fn=self._eval_input_fn,
steps=1,
metrics=self._eval_metrics,
name="one_pass")
_ = self._maybe_export(eval_result)
return eval_result
def _start_server(self):
"""Creates, starts, and returns a server_lib.Server."""
config = self._estimator.config
if (not config.cluster_spec or not config.task_type or not config.master or
config.task_id is None):
raise ValueError("Could not start server; be sure to specify "
"cluster_spec, task_type, master, and task in "
"RunConfig or set the TF_CONFIG environment variable.")
server = server_lib.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
config=config.tf_config,
start=False)
server.start()
return server
def _call_train(
self,
_sentinel=None, # pylint: disable=invalid-name,
input_fn=None,
steps=None,
hooks=None,
max_steps=None,
saving_listeners=None):
if _sentinel is not None:
raise ValueError("_call_train should be called with keyword args only")
# Estimator in core cannot work with monitors. We need to convert them
# to hooks. For Estimator in contrib, it is converted internally. So, it is
# safe to convert for both cases.
hooks = monitors.replace_monitors_with_hooks(hooks, self._estimator)
if self._core_estimator_used:
return self._estimator.train(
input_fn=input_fn,
steps=steps,
max_steps=max_steps,
hooks=hooks,
saving_listeners=saving_listeners)
else:
return self._estimator.fit(
input_fn=input_fn, steps=steps, max_steps=max_steps, monitors=hooks)
def _call_evaluate(
self,
_sentinel=None, # pylint: disable=invalid-name,
input_fn=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
if _sentinel is not None:
raise ValueError("_call_evaluate should be called with keyword args only")
if self._core_estimator_used:
if metrics is not None:
raise ValueError(
"`eval_metrics` must be `None` with `tf.estimator.Estimator`")
return self._estimator.evaluate(
input_fn=input_fn,
steps=steps,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
else:
return self._estimator.evaluate(
input_fn=input_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@contextlib.contextmanager
def _new_attr_context(obj, attr):
"""Creates a new context in which an object's attribute can be changed.
This creates a context in which an object's attribute can be changed.
Once the context is exited, the attribute reverts to its original value.
Args:
obj: An object whose attribute to restore at the end of the context.
attr: An attribute to remember and restore at the end of the context.
Yields:
Context.
Example:
my_obj.x = 1
with _new_attr_context(my_obj, "x"):
my_obj.x = 2
print(my_obj.x)
print(my_obj.x)
"""
saved = getattr(obj, attr)
try:
yield
finally:
setattr(obj, attr, saved)
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/experiment.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs an Experiment (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.contrib.training.python.training import hparam as hparam_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# TODO(xiejw): Refactor the learn_runner to make code reusable.
def _execute_schedule(experiment, schedule):
"""Execute the method named `schedule` of `experiment`."""
if not hasattr(experiment, schedule):
logging.error('Schedule references non-existent task %s', schedule)
valid_tasks = [x for x in dir(experiment)
if not x.startswith('_')
and callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise ValueError('Schedule references non-existent task %s' % schedule)
task = getattr(experiment, schedule)
if not callable(task):
logging.error('Schedule references non-callable member %s', schedule)
valid_tasks = [x for x in dir(experiment)
if not x.startswith('_')
and callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise TypeError('Schedule references non-callable member %s' % schedule)
return task()
def _wrapped_experiment_fn_with_uid_check(experiment_fn, require_hparams=False):
"""Wraps the `RunConfig` uid check with `experiment_fn`.
For `experiment_fn` which takes `run_config`, it is expected that the
`run_config` is passed to the Estimator correctly. Toward that, the wrapped
`experiment_fn` compares the `uid` of the `RunConfig` instance.
Args:
experiment_fn: The original `experiment_fn` which takes `run_config` and
`hparams`.
require_hparams: If True, the `hparams` passed to `experiment_fn` cannot be
`None`.
Returns:
A experiment_fn with same signature.
"""
def wrapped_experiment_fn(run_config, hparams):
"""Calls experiment_fn and checks the uid of `RunConfig`."""
if not isinstance(run_config, run_config_lib.RunConfig):
raise ValueError(
'`run_config` must be `tf.contrib.learn.RunConfig` instance')
if not run_config.model_dir:
raise ValueError(
'Must specify a model directory `model_dir` in `run_config`.')
if hparams is not None and not isinstance(hparams, hparam_lib.HParams):
raise ValueError('`hparams` must be `HParams` instance')
if require_hparams and hparams is None:
raise ValueError('`hparams` cannot be `None`.')
expected_uid = run_config.uid()
experiment = experiment_fn(run_config, hparams)
if not isinstance(experiment, Experiment):
raise TypeError('Experiment builder did not return an Experiment '
'instance, got %s instead.' % type(experiment))
config_from_estimator = experiment.estimator.config
if not hasattr(config_from_estimator, 'uid'):
raise RuntimeError(
'Pass `run_config` argument of the `experiment_fn` to the Estimator '
'in Experiment. It is likely a different `RunConfig` is passed to '
'`Estimator` or the `config` constructor argument in `Estimator` '
'is not set.')
if config_from_estimator.uid() != expected_uid:
raise RuntimeError(
'`RunConfig` instance is expected to be used by the `Estimator` '
'inside the `Experiment`. expected {}, but got {}'.format(
expected_uid, experiment.estimator.config.uid()))
return experiment
return wrapped_experiment_fn
@deprecated(None, 'Use tf.estimator.train_and_evaluate.')
def run(experiment_fn, output_dir=None, schedule=None, run_config=None,
hparams=None):
"""Make and run an experiment.
It creates an Experiment by calling `experiment_fn`. Then it calls the
function named as `schedule` of the Experiment.
If schedule is not provided, then the default schedule for the current task
type is used. The defaults are as follows:
* 'ps' maps to 'serve'
* 'worker' maps to 'train'
* 'master' maps to 'local_run'
If the experiment's config does not include a task type, then an exception
is raised.
Example with `run_config` (Recommended):
```
def _create_my_experiment(run_config, hparams):
# You can change a subset of the run_config properties as
# run_config = run_config.replace(save_checkpoints_steps=500)
return tf.contrib.learn.Experiment(
estimator=my_estimator(config=run_config, hparams=hparams),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
learn_runner.run(
experiment_fn=_create_my_experiment,
run_config=run_config_lib.RunConfig(model_dir="some/output/dir"),
schedule="train_and_evaluate",
hparams=_create_default_hparams())
```
or simply as
```
learn_runner.run(
experiment_fn=_create_my_experiment,
run_config=run_config_lib.RunConfig(model_dir="some/output/dir"))
```
if `hparams` is not used by the `Estimator`. On a single machine, `schedule`
defaults to `train_and_evaluate`.
Example with `output_dir` (deprecated):
```
def _create_my_experiment(output_dir):
return tf.contrib.learn.Experiment(
estimator=my_estimator(model_dir=output_dir),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
learn_runner.run(
experiment_fn=_create_my_experiment,
output_dir="some/output/dir",
schedule="train")
```
Args:
experiment_fn: A function that creates an `Experiment`. It could be one of
the two following signatures:
1) [Deprecated] It accepts an argument `output_dir` which should be used
to create the `Estimator` (passed as `model_dir` to its constructor). It
must return an `Experiment`. For this case, `run_config` and `hparams`
must be None.
2) It accepts two arguments `run_config` and `hparams`, which should be
used to create the `Estimator` (`run_config` passed as `config` to its
constructor; `hparams` used as the hyper-parameters of the model).
It must return an `Experiment`. For this case, `output_dir` must be None.
output_dir: Base output directory [Deprecated].
schedule: The name of the method in the `Experiment` to run.
run_config: `RunConfig` instance. The `run_config.model_dir` must be
non-empty. If `run_config` is set, `output_dir` must be None.
hparams: `HParams` instance. The default hyper-parameters, which will be
passed to the `experiment_fn` if `run_config` is not None.
Returns:
The return value of function `schedule`.
Raises:
ValueError: If both `output_dir` and `run_config` are empty or set,
`schedule` is None but no task type is set in the built experiment's
config, the task type has no default, `run_config.model_dir` is empty or
`schedule` doesn't reference a member of `Experiment`.
TypeError: `schedule` references non-callable member.
"""
if output_dir is not None and run_config is not None:
raise ValueError('Cannot provide both `output_dir` and `run_config`')
if output_dir is None and run_config is None:
raise ValueError('Must set value for `output_dir` or `run_config`')
if not callable(experiment_fn):
raise TypeError('Experiment builder "%s" is not callable.' %
experiment_fn)
experiment = None
if run_config is not None:
wrapped_experiment_fn = _wrapped_experiment_fn_with_uid_check(experiment_fn)
experiment = wrapped_experiment_fn(run_config=run_config, hparams=hparams)
else:
if not output_dir:
raise ValueError('Must specify an output directory')
if hparams is not None:
raise ValueError(
'Must set `hparams` as None for `experiment_fn` with `output_dir`.')
# Call the builder
experiment = experiment_fn(output_dir=output_dir)
if not isinstance(experiment, Experiment):
raise TypeError('Experiment builder did not return an Experiment '
'instance, got %s instead.' % type(experiment))
# Get the schedule
run_config = run_config or experiment.estimator.config
schedule = schedule or _get_default_schedule(run_config)
return _execute_schedule(experiment, schedule)
@deprecated(None, 'Use tf.estimator.train_and_evaluate.')
def tune(experiment_fn, tuner):
"""Tune an experiment with hyper-parameters.
It iterates trials by running the Experiment for each trial with the
corresponding hyper-parameters. For each trial, it retrieves the
hyper-parameters from `tuner`, creates an Experiment by calling experiment_fn,
and then reports the measure back to `tuner`.
Example:
```
def _create_my_experiment(run_config, hparams):
hidden_units = [hparams.unit_per_layer] * hparams.num_hidden_layers
return tf.contrib.learn.Experiment(
estimator=DNNClassifier(config=run_config, hidden_units=hidden_units),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
tuner = create_tuner(study_configuration, objective_key)
learn_runner.tune(experiment_fn=_create_my_experiment, tuner)
```
Args:
experiment_fn: A function that creates an `Experiment`. It should accept an
argument `run_config` which should be used to create the `Estimator` (
passed as `config` to its constructor), and an argument `hparams`, which
should be used for hyper-parameters tuning. It must return an
`Experiment`.
tuner: A `Tuner` instance.
"""
while tuner.next_trial():
tuner.run_experiment(
_wrapped_experiment_fn_with_uid_check(
experiment_fn, require_hparams=True))
def _is_distributed(config):
"""Returns true if this is a distributed job."""
if not config.cluster_spec:
return False
# This is considered a distributed job if there is more than one task
# in the cluster spec.
task_count = 0
for job in config.cluster_spec.jobs:
for _ in config.cluster_spec.job_tasks(job):
task_count += 1
return task_count > 1
def _get_default_schedule(config):
"""Returns the default schedule for the provided RunConfig."""
if not config or not _is_distributed(config):
return 'train_and_evaluate'
if not config.task_type:
raise ValueError('Must specify a schedule')
if config.task_type == run_config_lib.TaskType.MASTER:
# TODO(rhaertel): handle the case where there is more than one master
# or explicitly disallow such a case.
return 'train_and_evaluate'
elif config.task_type == run_config_lib.TaskType.PS:
return 'run_std_server'
elif config.task_type == run_config_lib.TaskType.WORKER:
return 'train'
raise ValueError('No default schedule for task type: %s' % (config.task_type))
|
tensorflow-master
|
tensorflow/contrib/learn/python/learn/learn_runner.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.