python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_distort_image_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import resource_loader
_distort_image_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_distort_image_ops.so'))
# pylint: disable=invalid-name
def random_hsv_in_yiq(image,
max_delta_hue=0,
lower_saturation=1,
upper_saturation=1,
lower_value=1,
upper_value=1,
seed=None):
"""Adjust hue, saturation, value of an RGB image randomly in YIQ color space.
Equivalent to `adjust_yiq_hsv()` but uses a `delta_h` randomly
picked in the interval `[-max_delta_hue, max_delta_hue]`, a `scale_saturation`
randomly picked in the interval `[lower_saturation, upper_saturation]`, and
a `scale_value` randomly picked in the interval
`[lower_saturation, upper_saturation]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
max_delta_hue: float. Maximum value for the random delta_hue. Passing 0
disables adjusting hue.
lower_saturation: float. Lower bound for the random scale_saturation.
upper_saturation: float. Upper bound for the random scale_saturation.
lower_value: float. Lower bound for the random scale_value.
upper_value: float. Upper bound for the random scale_value.
seed: An operation-specific seed. It will be used in conjunction
with the graph-level seed to determine the real seeds that will be
used in this operation. Please see the documentation of
set_random_seed for its interaction with the graph-level random seed.
Returns:
3-D float tensor of shape `[height, width, channels]`.
Raises:
ValueError: if `max_delta`, `lower_saturation`, `upper_saturation`,
`lower_value`, or `upper_Value` is invalid.
"""
if max_delta_hue < 0:
raise ValueError('max_delta must be non-negative.')
if lower_saturation < 0:
raise ValueError('lower_saturation must be non-negative.')
if lower_value < 0:
raise ValueError('lower_value must be non-negative.')
if lower_saturation > upper_saturation:
raise ValueError('lower_saturation must be < upper_saturation.')
if lower_value > upper_value:
raise ValueError('lower_value must be < upper_value.')
if max_delta_hue == 0:
delta_hue = 0
else:
delta_hue = random_ops.random_uniform(
[], -max_delta_hue, max_delta_hue, seed=seed)
if lower_saturation == upper_saturation:
scale_saturation = lower_saturation
else:
scale_saturation = random_ops.random_uniform(
[], lower_saturation, upper_saturation, seed=seed)
if lower_value == upper_value:
scale_value = lower_value
else:
scale_value = random_ops.random_uniform(
[], lower_value, upper_value, seed=seed)
return adjust_hsv_in_yiq(image, delta_hue, scale_saturation, scale_value)
def adjust_hsv_in_yiq(image,
delta_hue=0,
scale_saturation=1,
scale_value=1,
name=None):
"""Adjust hue, saturation, value of an RGB image in YIQ color space.
This is a convenience method that converts an RGB image to float
representation, converts it to YIQ, rotates the color around the Y channel by
delta_hue in radians, scales the chrominance channels (I, Q) by
scale_saturation, scales all channels (Y, I, Q) by scale_value,
converts back to RGB, and then back to the original data type.
`image` is an RGB image. The image hue is adjusted by converting the
image to YIQ, rotating around the luminance channel (Y) by
`delta_hue` in radians, multiplying the chrominance channels (I, Q) by
`scale_saturation`, and multiplying all channels (Y, I, Q) by
`scale_value`. The image is then converted back to RGB.
Args:
image: RGB image or images. Size of the last dimension must be 3.
delta_hue: float, the hue rotation amount, in radians.
scale_saturation: float, factor to multiply the saturation by.
scale_value: float, factor to multiply the value by.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
"""
with ops.name_scope(name, 'adjust_hsv_in_yiq', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
rgb_altered = gen_distort_image_ops.adjust_hsv_in_yiq(
flt_image, delta_hue, scale_saturation, scale_value)
return image_ops.convert_image_dtype(rgb_altered, orig_dtype)
|
tensorflow-master
|
tensorflow/contrib/image/python/ops/distort_image_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building quantized models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,g-bad-import-order
from tensorflow.contrib.quantization.python import array_ops as quantized_array_ops
from tensorflow.contrib.quantization.python.math_ops import *
from tensorflow.contrib.quantization.python.nn_ops import *
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import,wildcard-import,g-bad-import-order
|
tensorflow-master
|
tensorflow/contrib/quantization/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/contrib/quantization/python/array_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.quantization.python.array_ops import *
from tensorflow.contrib.quantization.python.math_ops import *
from tensorflow.contrib.quantization.python.nn_ops import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/quantization/python/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/quantization/python/nn_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Math Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/quantization/python/math_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Inter-process communication using MPI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
def _load_library(name, op_list=None):
"""Loads a .so file containing the specified operators.
Args:
name: The name of the .so file to load.
op_list: A list of names of operators that the library should have. If None
then the .so file's contents will not be verified.
Raises:
NameError if one of the required ops is missing.
"""
try:
filename = resource_loader.get_path_to_datafile(name)
library = load_library.load_op_library(filename)
for expected_op in (op_list or []):
for lib_op in library.OP_LIST.op:
if lib_op.name == expected_op:
break
else:
raise NameError('Could not find operator %s in dynamic library %s' %
(expected_op, name))
return library
except errors.NotFoundError:
logging.warning('%s file could not be loaded.', name)
MPI_LIB = _load_library(
'mpi_collectives.so',
['MPISize', 'MPIRank', 'MPILocalRank', 'MPIAllgather', 'MPIAllreduce'])
def size(name=None):
"""An op which returns the number of MPI processes.
This is equivalent to running `MPI_Comm_size(MPI_COMM_WORLD, ...)` to get the
size of the global communicator.
Returns:
An integer scalar containing the number of MPI processes.
"""
return MPI_LIB.mpi_size(name=name)
ops.NotDifferentiable('MPISize')
def rank(name=None):
"""An op which returns the MPI rank of the calling process.
This is equivalent to running `MPI_Comm_rank(MPI_COMM_WORLD, ...)` to get the
rank of the current process in the global communicator.
Returns:
An integer scalar with the MPI rank of the calling process.
"""
return MPI_LIB.mpi_rank(name=name)
ops.NotDifferentiable('MPIRank')
def init(name=None):
"""An op which initializes MPI on the device on which it is run.
All future MPI ops must be run on the same device that the `init` op was run
on.
"""
return MPI_LIB.mpi_init(name=name)
ops.NotDifferentiable('MPIInit')
def local_rank(name=None):
"""An op which returns the local MPI rank of the calling process, within the
node that it is running on. For example, if there are seven processes running
on a node, their local ranks will be zero through six, inclusive.
This is equivalent to running `MPI_Comm_rank(...)` on a new communicator
which only includes processes on the same node.
Returns:
An integer scalar with the local MPI rank of the calling process.
"""
return MPI_LIB.mpi_local_rank(name=name)
ops.NotDifferentiable('MPILocalRank')
def _allreduce(tensor, name=None):
"""An op which sums an input tensor over all the MPI processes.
The reduction operation is keyed by the name of the op. The tensor type and
shape must be the same on all MPI processes for a given name. The reduction
will not start until all processes are ready to send and receive the tensor.
Returns:
A tensor of the same shape and type as `tensor`, summed across all
processes.
"""
return MPI_LIB.mpi_allreduce(tensor, name=name)
ops.NotDifferentiable('MPIAllreduce')
def allgather(tensor, name=None):
"""An op which concatenates the input tensor with the same input tensor on
all other MPI processes.
The concatenation is done on the first dimension, so the input tensors on the
different processes must have the same rank and shape, except for the first
dimension, which is allowed to be different.
Returns:
A tensor of the same type as `tensor`, concatenated on dimension zero
across all processes. The shape is identical to the input shape, except for
the first dimension, which may be greater and is the sum of all first
dimensions of the tensors in different MPI processes.
"""
# Specify that first allgather is to collect the tensor gather sizes,
# indicated by passing in a scalar (0-D tensor) of value 0
sizes_flag = tf.constant(0, dtype=tf.int64, name='size_flag_const')
my_size = tf.slice(
tf.shape(tensor, out_type=tf.int64), [0], [1], name='size_slice')
if name is None:
name = 'allgather'
sizing_name = '{}_sizing'.format(name)
sizes = MPI_LIB.mpi_allgather(my_size, sizes_flag, name=sizing_name)
return MPI_LIB.mpi_allgather(tensor, sizes, name=name)
ops.NotDifferentiable('MPIAllgather')
|
tensorflow-master
|
tensorflow/contrib/mpi_collectives/mpi_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
from tensorflow.python.platform import test
average_allgather = False
class AllgatherTest(test.TestCase):
def checkAllgather(self, num_ranks, all_gathered, local_gathered):
# Ensure that indices match.
all_gat_ind = np.sort(all_gathered.indices)
loc_gat_ind = np.sort(local_gathered.indices)
assert(len(loc_gat_ind) == len(all_gat_ind))
for i in range(len(loc_gat_ind)):
assert(loc_gat_ind[i] == all_gat_ind[i])
# For each index, verify same values.
local_checked = []
for i in range(len(local_gathered.indices)):
local_checked.append(False)
for i in range(len(all_gathered.indices)):
all_index = all_gathered.indices[i]
# TODO(jthestness): Make this lookup quicker using sorting.
loc_index = -1
for j in range(len(local_gathered.indices)):
if local_gathered.indices[j] == all_index and not local_checked[j]:
loc_index = j
local_checked[j] = True
break
assert(loc_index >= 0)
correct_output = local_gathered.values[loc_index][0]
if average_allgather:
correct_output = correct_output / float(num_ranks)
assert(all_gathered.values[i][0] == correct_output)
def test_mpi_allgather(self):
# Get MPI rank
my_rank = int(os.environ['PMI_RANK'])
num_ranks = int(os.environ['PMI_SIZE'])
indices_per_rank = 100
tensor_width = 10
# Create IndexedSlices for each rank, some with overlapping indices.
to_gather_indices = []
to_gather_values = []
to_gather = []
for rank_id in range(num_ranks):
indices = []
values = []
my_multiple = rank_id + 1
current_index = my_multiple
for i in range(indices_per_rank):
indices.append(current_index)
ones_tensor = tf.ones([tensor_width])
values.append(tf.multiply(ones_tensor,
tf.fill(ones_tensor.get_shape(),
float(current_index))))
current_index += my_multiple
concat_ind = tf.stack(indices)
concat_vals = tf.stack(values)
to_gather_indices.append(concat_ind)
to_gather_values.append(concat_vals)
to_gather.append(tf.IndexedSlices(concat_vals, concat_ind))
# Collect the local IndexedSlices (indices and values) to create
# correct IndexedSlices output.
correct_gather_indices = tf.concat(to_gather_indices, 0)
correct_gather_values = tf.concat(to_gather_values, 0)
correct_gather = tf.IndexedSlices(correct_gather_values,
correct_gather_indices)
all_gather = mpi.allreduce(to_gather[my_rank], average_allgather)
# NOTE: This assumes that device IDs are numbered the same as ranks.
gpu_options = tf.GPUOptions(visible_device_list=str(my_rank))
config = tf.ConfigProto(gpu_options=gpu_options)
# MPI Session to test allgather.
with mpi.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
all_gathered, local_gathered = sess.run([all_gather, correct_gather])
# Compare all_gathered with local_gathered.
self.checkAllgather(num_ranks, all_gathered, local_gathered)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/mpi_collectives/mpi_allgather_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Communicating Between Processes with MPI
TensorFlow natively provides inter-device communication through send and
receive ops and inter-node communication through Distributed TensorFlow, based
on the same send and receive abstractions. On HPC clusters where Infiniband or
other high-speed node interconnects are available, these can end up being
insufficient for synchronous data-parallel training (without asynchronous
gradient descent). This module implements a variety of MPI ops which can take
advantage of hardware-specific MPI libraries for efficient communication.
In order to use this module, TensorFlow must be built with an MPI library,
which can be provided to the `./configure` script at build time. As a user of
TensorFlow, you will need to build TensorFlow yourself to select the MPI
library to use; to do so, follow the [instructions for building TensorFlow from
source](https://www.tensorflow.org/get_started/os_setup#installing_from_sources).
### Utility Ops
In addition to reductions and gathers, this module provides utility operations
for detecting the running MPI configuration.
Example:
```python
import tensorflow.contrib.mpi_collectives as mpi
# Use `mpi.Session` instead of `tf.Session`
with mpi.Session() as session:
rank = session.run(mpi.rank())
print("My MPI Rank:", rank)
if rank == 0:
print("MPI Size:", session.run(mpi.size()))
```
@@init
@@size
@@rank
@@local_rank
### Ring Allreduce and Allgather
When summing or averaging tensors across many processes, communication can
easily become a bottleneck. A naive implementation will send all the tensor
values to the same process, perform the reduction, and then broadcast the
values back to all other processes, effectively creating a synchronous
parameter server in one process. However, the process responsible for
performing the reduction will have to receive and send a massive amount of data
which scales with the number of processes *and* the number of parameters in the
model.
Instead of centralizing the reduction and having one primary reducer, we can
implement a distributed allreduce or allgather. A bandwidth-optimal allreduce
will end up sending 2(N - 1) values for every value in the input tensor,
and can be implemented with a ring allreduce [1]. (Intuitively, a linear reduce
requires at least (N - 1) sends between the different nodes, and a broadcast of
the result also requires (N - 1) sends, for a total of 2 (N - 1); these two
steps cannot be combined in a clever way to reduce the number of required
sends.) This module implements bandwidth-optimal ring allreduce and ring
allgather operations using MPI; by choosing a hardware-appropriate MPI
implementation (such as OpenMPI with CUDA-IPC support), you can train large
models with synchronous gradient descent with minimal communication overhead.
In addition to the `allreduce` and `allgather` functions, a convenience
`DistributedOptimizer` wrapper is provided to simplify using these functions
for reducing model gradients.
Example:
```python
import tensorflow as tf
from tensorflow.contrib import mpi_collectives as mpi
# Construct a simple linear regression model to optimize
W = tf.get_variable("W", shape=[20, 1], dtype=tf.float32)
B = tf.get_variable("B", shape=[1, 1], dtype=tf.float32)
inputs = tf.placeholder("Inputs", shape=[None, 20])
outputs = tf.placeholder("Outputs", shape=[None, 1])
loss = tf.nn.l2_loss(tf.matmul(inputs, W) + B - outputs)
# Training using MPI allreduce with DistributedOptimizer
optimizer = mpi.DistributedOptimizer(tf.train.AdamOptimizer())
train = optimizer.minimize(loss)
# Average loss over all ranks, for printing.
# Do not pass this to an optimizer!
avg_loss = mpi.allreduce(loss)
# On different ranks, feed different input data.
with mpi.Session() as session:
rank = session.run(mpi.rank())
batch_inputs, batch_outputs = construct_batch_for_rank(rank)
feed_dict = {inputs: batch_inputs, outputs: batch_outputs}
_, l = session.run([train, avg_loss], feed_dict=feed_dict)
print("Average Loss:", l)
```
[1] Patarasuk, Pitch and Yuan, Xin. "Bandwidth Optimal All-reduce Algorithms
for Clusters of Workstations".
@@Session
@@DistributedOptimizer
@@allreduce
@@allgather
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import init
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import size
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import rank
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import local_rank
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import allgather
from tensorflow.contrib.mpi_collectives.python.ops.mpi_ops import _allreduce
def allreduce(tensor, average=True):
"""Perform an MPI allreduce on a tf.Tensor or tf.IndexedSlices.
Arguments:
tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
The shape of the input must be identical across all ranks.
average: If True, computes the average over all ranks.
Otherwise, computes the sum over all ranks.
This function performs a bandwidth-optimal ring allreduce on the input
tensor. If the input is an tf.IndexedSlices, the function instead does an
allgather on the values and the indices, effectively doing an allreduce on
the represented tensor.
"""
if isinstance(tensor, tf.IndexedSlices):
# For IndexedSlices, do two allgathers intead of an allreduce.
mpi_size = tf.cast(size(), tensor.values.dtype)
values = allgather(tensor.values)
indices = allgather(tensor.indices)
# To make this operation into an average, divide all gathered values by
# the MPI size.
new_values = tf.div(values, mpi_size) if average else values
return tf.IndexedSlices(new_values, indices,
dense_shape=tensor.dense_shape)
else:
mpi_size = tf.cast(size(), tensor.dtype)
summed_tensor = _allreduce(tensor)
new_tensor = (tf.div(summed_tensor, mpi_size)
if average else summed_tensor)
return new_tensor
class DistributedOptimizer(tf.train.Optimizer):
"""An optimizer that wraps another tf.Optimizer, using an MPI allreduce to
average gradient values before applying gradients to model weights."""
def __init__(self, optimizer, name=None, use_locking=False):
"""Construct a new DistributedOptimizer, which uses another optimizer
under the hood for computing single-process gradient values and
applying gradient updates after the gradient values have been averaged
across all the MPI ranks.
Args:
optimizer: Optimizer to use for computing gradients and applying updates.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Distributed" followed by the provided
optimizer type.
use_locking: Whether to use locking when updating variables. See
Optimizer.__init__ for more info.
"""
if name is None:
name = "Distributed{}".format(type(optimizer).__name__)
self._optimizer = optimizer
super(DistributedOptimizer, self).__init__(
name=name, use_locking=use_locking)
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of all trainable variables.
See Optimizer.compute_gradients() for more info.
In DistributedOptimizer, compute_gradients() is overridden to also
allreduce the gradients before returning them.
"""
gradients = (super(DistributedOptimizer, self)
.compute_gradients(*args, **kwargs))
return [(allreduce(gradient), var) for (gradient, var) in gradients]
def _apply_dense(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_dense(*args, **kwargs)
def _apply_sparse(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_sparse(*args, **kwargs)
def _apply_sparse_duplicate_indices(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._apply_sparse_duplicate_indices(*args,
**kwargs)
def _prepare(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._prepare(*args, **kwargs)
def _create_slots(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._create_slots(*args, **kwargs)
def _valid_dtypes(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._valid_dtypes(*args, **kwargs)
def _finish(self, *args, **kwargs):
"""Calls this same method on the underlying optimizer."""
return self._optimizer._finish(*args, **kwargs)
class Session(tf.Session):
"""A class for running TensorFlow operations, with copies of the same graph
running distributed across different MPI nodes.
The primary difference between `tf.Session` and
`tf.contrib.mpi_collectives.Session` is that the MPI `Session` ensures that
the `Session` options are correct for use with `tf.contrib.mpi`, and
initializes MPI immediately upon the start of the session.
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow MPI session.
Unlike a normal `tf.Session`, an MPI Session may only use a single GPU,
which must be specified in advance before the session is initialized.
In addition, it only uses a single graph evaluation thread, and
initializes MPI immediately upon starting.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# Initialize MPI on the relevant device.
# TODO: Move this to library load and eliminate mpi.Session()
if graph is None:
graph = tf.get_default_graph()
with graph.as_default():
self.run(init())
|
tensorflow-master
|
tensorflow/contrib/mpi_collectives/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
from tensorflow.python.platform import test
average_allreduce = False
max_wrong_count = -1
class AllreduceTest(test.TestCase):
def dumpFailure(self, my_rank, out_loc_red, my_correct, out_all_red,
our_correct):
# Find reduced/allreduced indices that are wrong and print all the
# values from output, slices, reduced, allreduced, so we can debug
# which is incorrect:
wrong_count = 0
red_dims = out_loc_red.shape
assert(len(red_dims) == 2)
for i in range(red_dims[0]):
for j in range(red_dims[1]):
suffix = ""
if out_loc_red[i][j] != my_correct[i][j] or \
out_all_red[i][j] != our_correct[i][j]:
suffix = "WRONG"
wrong_count += 1
print("{}\t{}\t{}\t{}\t{}\t{}"
.format(my_rank, i, j, out_loc_red[i][j],
out_all_red[i][j], suffix), flush=True)
if max_wrong_count > 0 and wrong_count >= max_wrong_count:
return
def test_mpi_allreduce(self):
# Get MPI rank
my_rank = int(os.environ['PMI_RANK'])
num_ranks = int(os.environ['PMI_SIZE'])
stages = 13
batch_size = 1331
hidden_size = batch_size
out_size = batch_size
# Input placeholder (batch_size x hidden) - init to 1s
inputs = tf.placeholder(tf.float32, shape=(batch_size, hidden_size),
name="Input")
# Large matrices (hidden x out_dim) - init random
weights = []
for i in range(stages):
initer = tf.constant_initializer(pow(2.0, i + 1.0))
weights.append(tf.get_variable("weights_{}".format(i),
shape=(hidden_size, out_size),
dtype=tf.float32,
initializer=initer))
# Calculate output through dependent allreduces
stage_input = inputs
for i in range(stages):
inter_output = tf.add(stage_input, weights[i],
name="add_red_{}".format(i))
stage_input = mpi.allreduce(inter_output,
average=average_allreduce)
all_reduced = stage_input
# Local reduced output for verification
local_input = inputs
for i in range(stages):
inter_output = tf.add(local_input, weights[i],
name="addin_loc_{}".format(i))
my_reducer = tf.Variable(initial_value=np.ones((hidden_size, out_size)),
dtype=tf.float32, name="loc_redr_{}".format(i))
for r in range(num_ranks):
my_reducer = tf.add(my_reducer, inter_output,
name="add_loc_{}_{}".format(i, r))
if average_allreduce:
local_input = tf.div(my_reducer, num_ranks,
name="div_loc_{}".format(i))
else:
local_input = my_reducer
local_reduced = local_input
# NOTE: This assumes that device IDs are numbered the same as ranks
gpu_options = tf.GPUOptions(visible_device_list=str(my_rank))
config = tf.ConfigProto(gpu_options=gpu_options)
# MPI Session to test allreduce
with mpi.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
input_feed = np.ones((batch_size, hidden_size), dtype=np.float32)
our_output = input_feed[0][0]
spread_var = 100
input_feed = input_feed + my_rank * spread_var
my_output = input_feed[0][0]
for i in range(stages):
curr_feed = my_output + pow(2.0, i + 1.0)
my_output = curr_feed * num_ranks + 1
curr_our_feed = our_output + pow(2.0, i + 1.0)
if i == 0:
sum_ranks = num_ranks * (num_ranks - 1) / 2
our_output = curr_our_feed * num_ranks + \
spread_var * sum_ranks
else:
our_output = curr_our_feed * num_ranks
print("rank {}: My output is {}".format(my_rank, my_output))
my_correct = np.zeros((batch_size, hidden_size), dtype=np.float32)
my_correct = my_correct + my_output
print("rank {}: Our output is {}".format(my_rank, our_output))
our_correct = np.zeros((batch_size, hidden_size), dtype=np.float32)
our_correct = our_correct + our_output
for i in range(1000):
if i % 100 == 0:
print("{}: iter {}".format(my_rank, i), flush=True)
feed_dict = {inputs: input_feed}
out_all_red, out_loc_red \
= sess.run([all_reduced, local_reduced],
feed_dict=feed_dict)
if not np.allclose(out_loc_red, my_correct) or \
not np.allclose(out_all_red, our_correct):
print("Test incorrect on iter {}".format(i), flush=True)
self.dumpFailure(my_rank, out_loc_red, my_correct, out_all_red,
our_correct)
assert(np.allclose(out_loc_red, my_correct) and
np.allclose(out_all_red, our_correct))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/mpi_collectives/mpi_allreduce_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.contrib.mpi_collectives.mpi_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import itertools
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
def mpi_env_rank_and_size():
"""Get MPI rank and size from environment variables and return them as a
tuple of integers.
Most MPI implementations have an `mpirun` or `mpiexec` command that will
run an MPI executable and set up all communication necessary between the
different processors. As part of that set up, they will set environment
variables that contain the rank and size of the MPI_COMM_WORLD
communicator. We can read those environment variables from Python in order
to ensure that `mpi.rank()` and `mpi.size()` return the expected values.
Since MPI is just a standard, not an implementation, implementations
typically choose their own environment variable names. This function tries
to support several different implementation, but really it only needs to
support whatever implementation we want to use for the TensorFlow test
suite.
If this is not running under MPI, then defaults of rank zero and size one
are returned. (This is appropriate because when you call MPI_Init in an
application not started with mpirun, it will create a new independent
communicator with only one process in it.)
"""
rank_env = "PMI_RANK OMPI_COMM_WORLD_RANK".split()
size_env = "PMI_SIZE OMPI_COMM_WORLD_SIZE".split()
for rank_var, size_var in zip(rank_env, size_env):
rank = os.environ.get(rank_var)
size = os.environ.get(size_var)
if rank is not None and size is not None:
return int(rank), int(size)
# Default to rank zero and size one if there are no environment variables
return 0, 1
class MPITests(tf.test.TestCase):
"""
Tests for MPI ops in tensorflow.contrib.mpi_collectives.
"""
def test_mpi_rank(self):
"""Test that the rank returned by mpi.rank() is correct."""
true_rank, _ = mpi_env_rank_and_size()
with self.test_session() as session:
rank = session.run(mpi.rank())
self.assertEqual(true_rank, rank)
def test_mpi_size(self):
"""Test that the size returned by mpi.size() is correct."""
_, true_size = mpi_env_rank_and_size()
with self.test_session() as session:
size = session.run(mpi.size())
self.assertEqual(true_size, size)
def test_mpi_allreduce_cpu(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors."""
with self.test_session() as session:
size = session.run(mpi.size())
dtypes = [tf.int32, tf.float32]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tf.set_random_seed(1234)
tensor = tf.random_uniform([17] * dim, -100, 100, dtype=dtype)
summed = mpi.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"mpi.allreduce produces incorrect results")
def test_mpi_allreduce_gpu(self):
"""Test that the allreduce works on GPUs.
This test will crash badly if used with an MPI implementation that does
not support GPU memory transfers directly, as it will call MPI_Send on
a GPU data pointer."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
return
no_gpus = tf.GPUOptions(visible_device_list="")
cpu_config = tf.ConfigProto(gpu_options=no_gpus)
with self.test_session(config=cpu_config) as session:
local_rank = session.run(mpi.local_rank())
one_gpu = tf.GPUOptions(visible_device_list=str(local_rank))
gpu_config = tf.ConfigProto(gpu_options=one_gpu)
with self.test_session(config=gpu_config) as session:
size = session.run(mpi.size())
dtype = tf.float32
dim = 3
with tf.device("/gpu:0"):
tf.set_random_seed(1234)
tensor = tf.random_uniform([17] * dim, -100, 100, dtype=dtype)
summed = mpi.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
return
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"mpi.allreduce on GPU produces incorrect results")
def test_mpi_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
tf.set_random_seed(1234)
dims = [17 + rank] * 3
tensor = tf.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
# Same number of elements, different rank
tf.set_random_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = tf.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
def test_mpi_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
dims = [17] * 3
tensor = tf.ones(dims, dtype=tf.int32 if rank % 2 == 0 else tf.float32)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
def test_mpi_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
with self.test_session() as session:
size = session.run(mpi.size())
rank = session.run(mpi.rank())
dtypes = tf.int32, tf.float32
dims = 1, 2, 3
for dtype, dim in itertools.product(dtypes, dims):
tensor = tf.ones([17] * dim, dtype=dtype) * rank
gathered = mpi.allgather(tensor)
gathered_tensor = session.run(gathered)
self.assertEqual(list(gathered_tensor.shape),
[17 * size] + [17] * (dim - 1))
for i in range(size):
rank_tensor = tf.slice(gathered_tensor, [i * 17] + [0] * (dim - 1),
[17] + [-1] * (dim - 1))
self.assertEqual(list(rank_tensor.shape), [17] * dim)
self.assertTrue(session.run(tf.reduce_all(tf.equal(rank_tensor, i))),
"mpi.allgather produces incorrect gathered tensor")
def test_mpi_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
with self.test_session() as session:
size = session.run(mpi.size())
rank = session.run(mpi.rank())
dtypes = tf.int32, tf.float32
dims = 1, 2, 3
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1),
dtype=dtype) * rank
gathered = mpi.allgather(tensor)
gathered_tensor = session.run(gathered)
expected_size = sum(tensor_sizes)
self.assertEqual(list(gathered_tensor.shape),
[expected_size] + [17] * (dim - 1))
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = tf.slice(gathered,
[sum(tensor_sizes[:i])] + [0] * (dim - 1),
rank_size)
self.assertEqual(list(rank_tensor.shape), rank_size)
self.assertTrue(session.run(tf.reduce_all(tf.equal(rank_tensor, i))),
"mpi.allgather produces incorrect gathered tensor")
def test_mpi_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = tf.ones(tensor_size, dtype=tf.float32) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allgather(tensor))
def test_mpi_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
tensor_size = [17] * 3
dtype = tf.int32 if rank % 2 == 0 else tf.float32
tensor = tf.ones(tensor_size, dtype=dtype) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allgather(tensor))
if __name__ == '__main__':
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/mpi_collectives/mpi_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Inter-process communication using MPI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.mpi_collectives.ops import gen_mpi_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_mpi_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile('_mpi_ops.so'))
def size(name=None):
"""An op which returns the number of MPI processes.
This is equivalent to running `MPI_Comm_size(MPI_COMM_WORLD, ...)` to get the
size of the global communicator.
Returns:
An integer scalar containing the number of MPI processes.
"""
return gen_mpi_ops.mpi_size(name=name)
ops.NotDifferentiable('MPISize')
def rank(name=None):
"""An op which returns the MPI rank of the calling process.
This is equivalent to running `MPI_Comm_rank(MPI_COMM_WORLD, ...)` to get the
rank of the current process in the global communicator.
Returns:
An integer scalar with the MPI rank of the calling process.
"""
return gen_mpi_ops.mpi_rank(name=name)
ops.NotDifferentiable('MPIRank')
def init(name=None):
"""An op which initializes MPI on the device on which it is run.
All future MPI ops must be run on the same device that the `init` op was run
on.
"""
return gen_mpi_ops.mpi_init(name=name)
ops.NotDifferentiable('MPIInit')
def local_rank(name=None):
"""An op which returns the local MPI rank of the calling process, within the
node that it is running on. For example, if there are seven processes running
on a node, their local ranks will be zero through six, inclusive.
This is equivalent to running `MPI_Comm_rank(...)` on a new communicator
which only includes processes on the same node.
Returns:
An integer scalar with the local MPI rank of the calling process.
"""
return gen_mpi_ops.mpi_local_rank(name=name)
ops.NotDifferentiable('MPILocalRank')
def _allreduce(tensor, name=None):
"""An op which sums an input tensor over all the MPI processes.
The reduction operation is keyed by the name of the op. The tensor type and
shape must be the same on all MPI processes for a given name. The reduction
will not start until all processes are ready to send and receive the tensor.
Returns:
A tensor of the same shape and type as `tensor`, summed across all
processes.
"""
return gen_mpi_ops.mpi_allreduce(tensor, name=name)
ops.NotDifferentiable('MPIAllreduce')
def allgather(tensor, name=None):
"""An op which concatenates the input tensor with the same input tensor on
all other MPI processes.
The concatenation is done on the first dimension, so the input tensors on the
different processes must have the same rank and shape, except for the first
dimension, which is allowed to be different.
Returns:
A tensor of the same type as `tensor`, concatenated on dimension zero
across all processes. The shape is identical to the input shape, except for
the first dimension, which may be greater and is the sum of all first
dimensions of the tensors in different MPI processes.
"""
# Specify that first allgather is to collect the tensor gather sizes,
# indicated by passing in a scalar (0-D tensor) of value 0
sizes_flag = tf.constant(0, dtype=tf.int64, name='size_flag_const')
my_size = tf.slice(
tf.shape(tensor, out_type=tf.int64), [0], [1], name='size_slice')
if name is None:
name = 'allgather'
sizing_name = '{}_sizing'.format(name)
sizes = gen_mpi_ops.mpi_allgather(my_size, sizes_flag, name=sizing_name)
return gen_mpi_ops.mpi_allgather(tensor, sizes, name=name)
ops.NotDifferentiable('MPIAllgather')
|
tensorflow-master
|
tensorflow/contrib/mpi_collectives/python/ops/mpi_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.testing.python.framework.fake_summary_writer import *
from tensorflow.contrib.testing.python.framework.util_test import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/testing/__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fake summary writer for unit tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.summary.writer import writer
from tensorflow.python.summary.writer import writer_cache
# TODO(ptucker): Replace with mock framework.
class FakeSummaryWriter(object):
"""Fake summary writer."""
_replaced_summary_writer = None
@classmethod
def install(cls):
if cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter already installed.')
cls._replaced_summary_writer = writer.FileWriter
writer.FileWriter = FakeSummaryWriter
writer_cache.FileWriter = FakeSummaryWriter
@classmethod
def uninstall(cls):
if not cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter not installed.')
writer.FileWriter = cls._replaced_summary_writer
writer_cache.FileWriter = cls._replaced_summary_writer
cls._replaced_summary_writer = None
def __init__(self, logdir, graph=None):
self._logdir = logdir
self._graph = graph
self._summaries = {}
self._added_graphs = []
self._added_meta_graphs = []
self._added_session_logs = []
self._added_run_metadata = {}
@property
def summaries(self):
return self._summaries
def assert_summaries(self,
test_case,
expected_logdir=None,
expected_graph=None,
expected_summaries=None,
expected_added_graphs=None,
expected_added_meta_graphs=None,
expected_session_logs=None):
"""Assert expected items have been added to summary writer."""
if expected_logdir is not None:
test_case.assertEqual(expected_logdir, self._logdir)
if expected_graph is not None:
test_case.assertTrue(expected_graph is self._graph)
expected_summaries = expected_summaries or {}
for step in expected_summaries:
test_case.assertTrue(
step in self._summaries,
msg='Missing step %s from %s.' % (step, self._summaries.keys()))
actual_simple_values = {}
for step_summary in self._summaries[step]:
for v in step_summary.value:
# Ignore global_step/sec since it's written by Supervisor in a
# separate thread, so it's non-deterministic how many get written.
if 'global_step/sec' != v.tag:
actual_simple_values[v.tag] = v.simple_value
test_case.assertEqual(expected_summaries[step], actual_simple_values)
if expected_added_graphs is not None:
test_case.assertEqual(expected_added_graphs, self._added_graphs)
if expected_added_meta_graphs is not None:
test_case.assertEqual(len(expected_added_meta_graphs),
len(self._added_meta_graphs))
for expected, actual in zip(expected_added_meta_graphs,
self._added_meta_graphs):
test_util.assert_meta_graph_protos_equal(test_case, expected, actual)
if expected_session_logs is not None:
test_case.assertEqual(expected_session_logs, self._added_session_logs)
def add_summary(self, summ, current_global_step):
"""Add summary."""
if isinstance(summ, bytes):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summ)
summ = summary_proto
if current_global_step in self._summaries:
step_summaries = self._summaries[current_global_step]
else:
step_summaries = []
self._summaries[current_global_step] = step_summaries
step_summaries.append(summ)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_graph(self, graph, global_step=None, graph_def=None):
"""Add graph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
if graph_def is not None:
raise ValueError('Unexpected graph_def %s.' % graph_def)
self._added_graphs.append(graph)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Add metagraph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
self._added_meta_graphs.append(meta_graph_def)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_session_log(self, session_log, global_step=None):
# pylint: disable=unused-argument
self._added_session_logs.append(session_log)
def add_run_metadata(self, run_metadata, tag, global_step=None):
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
self._added_run_metadata[tag] = run_metadata
def flush(self):
pass
def reopen(self):
pass
def close(self):
pass
|
tensorflow-master
|
tensorflow/contrib/testing/python/framework/fake_summary_writer.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python.training import summary_io
def assert_summary(expected_tags, expected_simple_values, summary_proto):
"""Asserts summary contains the specified tags and values.
Args:
expected_tags: All tags in summary.
expected_simple_values: Simply values for some tags.
summary_proto: Summary to validate.
Raises:
ValueError: if expectations are not met.
"""
actual_tags = set()
for value in summary_proto.value:
actual_tags.add(value.tag)
if value.tag in expected_simple_values:
expected = expected_simple_values[value.tag]
actual = value.simple_value
np.testing.assert_almost_equal(
actual, expected, decimal=2, err_msg=value.tag)
expected_tags = set(expected_tags)
if expected_tags != actual_tags:
raise ValueError('Expected tags %s, got %s.' % (expected_tags, actual_tags))
def to_summary_proto(summary_str):
"""Create summary based on latest stats.
Args:
summary_str: Serialized summary.
Returns:
summary_pb2.Summary.
Raises:
ValueError: if tensor is not a valid summary tensor.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
return summary
# TODO(ptucker): Move to a non-test package?
def latest_event_file(base_dir):
"""Find latest event file in `base_dir`.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
File path, or `None` if none exists.
"""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
return sorted(file_paths)[-1] if file_paths else None
def latest_events(base_dir):
"""Parse events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
Iterable of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
file_path = latest_event_file(base_dir)
return summary_io.summary_iterator(file_path) if file_path else []
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
List of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
return [e for e in latest_events(base_dir) if e.HasField('summary')]
def simple_values_from_events(events, tags):
"""Parse summaries from events with simple_value.
Args:
events: List of tensorflow.Event protos.
tags: List of string event tags corresponding to simple_value summaries.
Returns:
dict of tag:value.
Raises:
ValueError: if a summary with a specified tag does not contain simple_value.
"""
step_by_tag = {}
value_by_tag = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
tag = v.tag
if tag in tags:
if not v.HasField('simple_value'):
raise ValueError('Summary for %s is not a simple_value.' % tag)
# The events are mostly sorted in step order, but we explicitly check
# just in case.
if tag not in step_by_tag or e.step > step_by_tag[tag]:
step_by_tag[tag] = e.step
value_by_tag[tag] = v.simple_value
return value_by_tag
|
tensorflow-master
|
tensorflow/contrib/testing/python/framework/util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A distributed computation library for TF.
See [tensorflow/contrib/distribute/README.md](
https://www.tensorflow.org/code/tensorflow/contrib/distribute/README.md)
for overview and examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.distribute.python.collective_all_reduce_strategy import CollectiveAllReduceStrategy
from tensorflow.contrib.distribute.python.mirrored_strategy import MirroredStrategy
from tensorflow.contrib.distribute.python.monitor import Monitor
from tensorflow.contrib.distribute.python.one_device_strategy import OneDeviceStrategy
from tensorflow.contrib.distribute.python.parameter_server_strategy import ParameterServerStrategy
from tensorflow.contrib.distribute.python.tpu_strategy import initialize_tpu_system
from tensorflow.contrib.distribute.python.tpu_strategy import TPUStrategy
from tensorflow.python.distribute.cross_device_ops import *
from tensorflow.python.distribute.distribute_config import DistributeConfig
from tensorflow.python.distribute.distribute_coordinator import run_standard_tensorflow_server
from tensorflow.python.distribute.distribute_lib import *
from tensorflow.python.distribute.distribution_strategy_context import *
from tensorflow.python.distribute.step_fn import *
from tensorflow.python.util.all_util import remove_undocumented
DistributionStrategy = StrategyV1
_allowed_symbols = [
'AllReduceCrossDeviceOps',
'CollectiveAllReduceStrategy',
'CrossDeviceOps',
'DistributeConfig',
'DistributionStrategy',
'DistributionStrategyExtended',
'MirroredStrategy',
'Monitor',
'MultiWorkerAllReduce',
'OneDeviceStrategy',
'ParameterServerStrategy',
'ReductionToOneDeviceCrossDeviceOps',
'Step',
'StandardInputStep',
'StandardSingleLossStep',
'ReplicaContext',
'TPUStrategy',
'initialize_tpu_system',
'get_cross_replica_context',
'get_distribution_strategy',
'get_loss_reduction',
'get_replica_context',
'get_strategy',
'has_distribution_strategy',
'has_strategy',
'in_cross_replica_context',
'require_replica_context',
'run_standard_tensorflow_server',
'UpdateContext',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/distribute/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitor is responsible for training, checkpointing and recovery."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.ops import variables
class Monitor(object):
"""Executes training steps, recovers and checkpoints.
Note that this class is particularly preliminary, experimental, and
expected to change.
"""
# TODO(isaprykin): Support step functions that need multiple session calls.
# TODO(isaprykin): Support extra arguments to the step function.
# TODO(isaprykin): Support recovery, checkpointing and summaries.
def __init__(self, step_callable, session=None):
"""Initialize the Monitor with components for executing training steps.
Args:
step_callable: a training `Step` that's capable of signaling when done.
session: a `Session` instance that's needed for graph mode.
Raises:
ValueError: if `session` was provided for eager mode or not provided for
graph mode.
"""
if context.executing_eagerly():
if session is not None:
raise ValueError("Should not provide a `session` in Eager mode.")
self._run_step = step_callable
else:
if session is None:
raise ValueError("Should provide a `session` in Graph mode.")
session.run(step_callable.initialize())
self._run_step = session.make_callable(step_callable())
session.run(variables.global_variables_initializer())
def run_steps(self, num_steps=None):
step = 0
while num_steps is None or step < num_steps:
try:
self._run_step()
step += 1
except errors.OutOfRangeError:
break
|
tensorflow-master
|
tensorflow/contrib/distribute/python/monitor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, (tpu_strategy.TPUStrategy,
tpu_strategy.TPUStrategyV1)):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
def get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution,
x_train, y_train, x_predict):
"""Generates the inputs for correctness check when enable Keras with DS."""
training_epochs = 2
global_batch_size = 64
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
with_distribution and
not distributed_training_utils.global_batch_size_supported(
with_distribution))
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
}
if use_validation_data:
eval_inputs = None
training_inputs['validation_data'] = (x_train, y_train)
else:
eval_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
}
predict_inputs = {
'x': np.array(x_predict, dtype=np.float32),
}
else:
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(
train_dataset, batch_size, with_distribution, repeat=training_epochs)
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': training_epochs,
'shuffle': False,
'steps_per_epoch': len(x_train) // global_batch_size,
}
if use_validation_data:
eval_inputs = None # Remove the eval_inputs
eval_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(eval_dataset, batch_size, with_distribution)
training_inputs['validation_data'] = x
training_inputs['validation_steps'] = 5
else:
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': 20,
}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset,
predict_batch_size, with_distribution)
predict_inputs = {
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
strategies_minus_tpu = [
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
]
tpu_strategies = [
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step
]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager'])
def tpu_strategy_combinations():
return combinations.combine(
distribution=tpu_strategies,
mode=['graph'])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def strategy_and_optimizer_combinations():
return combinations.times(
all_strategy_combinations(),
combinations.combine(optimizer=[
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_v1_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_v1_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_v1_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
]))
def strategy_and_input_combinations():
return (
combinations.times(
combinations.combine(distribution=strategies_minus_tpu),
combinations.combine(mode=['graph'],
use_numpy=[True, False],
use_validation_data=[True, False])
+ combinations.combine(mode=['eager'],
use_numpy=[False],
use_validation_data=[False])) +
combinations.times(
combinations.combine(distribution=tpu_strategies),
combinations.combine(mode=['graph'],
use_numpy=[True, False],
use_validation_data=[True, False])))
def strategy_for_numpy_input_combinations():
return combinations.combine(
distribution=strategies_minus_tpu + tpu_strategies,
mode=['graph'])
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(combinations.combine(
distribution=strategies_minus_tpu, mode=['graph']))
def test_numpy_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((20, 3), np.float32)
targets = np.zeros((20, 4), np.float32)
sample_weights = np.ones((20), np.float32)
model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertLen(outs, 2)
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(all_strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
user_controlled_model = get_model()
user_controlled_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(all_strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_minus_tpu_combinations())
def test_dataset_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat()
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_wrong_input_shape(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_no_batch_input_validation(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
batch_size = 8
if isinstance(distribution, mirrored_strategy.CoreMirroredStrategy):
# CoreMirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(batch_size)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(batch_size)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps
ref_output = np.ones((160, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
@combinations.generate(strategy_minus_tpu_combinations())
def testOptimizerWithCallbacks(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent_keras.SGD(0.01)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
grouped_models = distribution.experimental_local_results(
distributed_training_utils.get_distributed_model(
model, ModeKeys.TRAIN))
with distribution.scope():
for m in grouped_models:
self.assertAllClose(0.001, keras.backend.get_value(
m.optimizer.lr), atol=1e-05, rtol=1e-05)
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_unsupported_features(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument for dataset with
# infinite cardinality.
dataset = dataset.repeat()
with self.assertRaisesRegexp(ValueError, 'When passing an infinitely '
'repeating dataset, you must specify the '
'`steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError, 'When passing an infinitely '
'repeating dataset, you must specify the '
'`steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError, 'When passing an infinitely '
'repeating dataset, you must specify the '
'`steps` argument'):
model.predict(dataset, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_calling_with_unsupported_predefined_callbacks(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'You must specify a Keras Optimizer V2 when '
'using'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'You must specify a Keras Optimizer V2 when '
'using'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyWithLossMasking(test.TestCase,
parameterized.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_masking(self, distribution):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyWithNormalizationLayer(
test.TestCase, parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_batchnorm_correctness(self, distribution):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
x = x.astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 32, distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = batch_wrapper(predict_dataset, 32, distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_metric_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution)
batch_size = 64
if not distributed_training_utils.global_batch_size_supported(
distribution):
batch_size //= distribution.num_replicas_in_sync
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0])
@combinations.generate(all_strategy_combinations())
def test_eval_metrics_correctness(self, distribution):
with self.cached_session():
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='mae',
metrics=['accuracy', keras.metrics.BinaryAccuracy()],
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
distribute=distribution)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype('float32')
y = np.ones((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = batch_wrapper(dataset, 4, distribution)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 1.)
self.assertEqual(outs[2], 1.)
y = np.zeros((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = batch_wrapper(dataset, 4, distribution)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
@combinations.generate(strategy_and_input_combinations())
def test_correctness(self, distribution, use_numpy, use_validation_data):
with self.cached_session():
default_tolerance = 1e-5
tol_table = {}
if isinstance(distribution, (
mirrored_strategy.MirroredStrategy,
mirrored_strategy.CoreMirroredStrategy,
distribute_lib._DefaultDistributionStrategy)): # pylint: disable=protected-access
# TODO(b/119257215): Weights are not exactly the same, so use larger
# tolerance for now. Predict should be related to weights.
tol_table = {
'weights_1': 1e-4,
'weights_2': 1e-4,
'predict_result_1': 1e-4,
}
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
# Train, eval, and predict datasets are created with the same input numpy
# arrays.
# TODO(xiejw): Change this back to 10000, once we support final partial
# batch.
num_samples = 9984
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_predict = [[1.], [2.], [3.], [4.]]
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run. In addition, we add few non-linear layers to make
# it non-trivial.
def _create_model():
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
return model
model = _create_model()
initial_weights = model.get_weights()
del model # avoid accident usage.
def fit_eval_and_predict(with_distribution=None):
model = _create_model()
# We have initialized the model to the same weight for the distribution
# and non-distribution run.
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.5),
metrics=['mse'],
distribute=with_distribution)
training_inputs, eval_inputs, predict_inputs = (
get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution,
x_train, y_train, x_predict))
result = {}
result['training_history_1'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_1'] = model.evaluate(**eval_inputs)
result['weights_1'] = model.get_weights()
result['predict_result_1'] = model.predict(**predict_inputs)
# Train and eval again to mimic user's flow.
result['training_history_2'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_2'] = model.evaluate(**eval_inputs)
result['weights_2'] = model.get_weights()
return result
results_with_ds = fit_eval_and_predict(with_distribution=distribution)
results_without_ds = fit_eval_and_predict(with_distribution=None)
# Verify that the weights, training history, eval results, predict outputs
# are the same within some limits of tolerance.
for key in results_with_ds:
if (key.startswith('training_history') and
isinstance(distribution, tpu_strategy.TPUStrategyV1) and
distribution.extended.steps_per_run > 1):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = tol_table.get(key, default_tolerance)
self.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg='Fail to assert {}.'.format(key))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/keras_backward_compat_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contrib version of MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
# pylint: disable=protected-access,invalid-name
_call_for_each_replica = mirrored_strategy._call_for_each_replica
_create_mirrored_variable = mirrored_strategy._create_mirrored_variable
all_local_devices = mirrored_strategy.all_local_devices
CoreMirroredStrategy = mirrored_strategy.MirroredStrategy
CoreMirroredExtended = mirrored_strategy.MirroredExtended
# pylint: enable=protected-access,invalid-name
class MirroredStrategy(distribute_lib.StrategyV1):
"""Mirrors vars to distribute across multiple devices and machines.
*** contrib version ***
This strategy uses one replica per device and sync replication for its
multi-GPU version.
When `cluster_spec` is given by the `configure` method., it turns into the
mulit-worker version that works on multiple workers with in-graph replication.
Note: `configure` will be called by higher-level APIs if running in
distributed environment.
There are several important concepts for distributed TensorFlow, e.g.
`client`, `job`, `task`, `cluster`, `in-graph replication` and
`synchronous training` and they have already been defined in the
[TensorFlow's documentation](https://www.tensorflow.org/deploy/distributed).
The distribution strategy inherits these concepts as well and in addition to
that we also clarify several more concepts:
* **In-graph replication**: the `client` creates a single `tf.Graph` that
specifies tasks for devices on all workers. The `client` then creates a
client session which will talk to the `master` service of a `worker`. Then
the `master` will partition the graph and distribute the work to all
participating workers.
* **Worker**: A `worker` is a TensorFlow `task` that usually maps to one
physical machine. We will have multiple `worker`s with different `task`
index. They all do similar things except for one worker checkpointing model
variables, writing summaries, etc. in addition to its ordinary work.
The multi-worker version of this class maps one replica to one device on a
worker. It mirrors all model variables on all replicas. For example, if you
have two `worker`s and each `worker` has 4 GPUs, it will create 8 copies of
the model variables on these 8 GPUs. Then like in MirroredStrategy, each
replica performs their computation with their own copy of variables unless in
cross-replica model where variable or tensor reduction happens.
Args:
devices: a list of device strings.
num_gpus: number of GPUs. For local training, either specify `devices` or
`num_gpus`. In distributed training, this must be specified as number of
GPUs on each worker.
num_gpus_per_worker: number of GPUs per worker. This is the same as
`num_gpus` and only one of `num_gpus` and `num_gpus_per_worker` can be
specified.
cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not
set, the `configure` method will try to find the best one.
auto_shard_dataset: whether to auto-shard the dataset when there are
multiple workers.
cross_tower_ops: Deprecated alias for `cross_device_ops`.
"""
def __init__(self,
devices=None,
num_gpus=None,
num_gpus_per_worker=None,
cross_device_ops=None,
auto_shard_dataset=False,
cross_tower_ops=None):
assert not (cross_device_ops and cross_tower_ops)
if num_gpus is not None and num_gpus_per_worker is not None:
raise ValueError(
"You cannot specify both `num_gpus` and `num_gpus_per_worker`.")
if num_gpus is None:
num_gpus = num_gpus_per_worker
extended = MirroredExtended(self, devices, num_gpus,
cross_device_ops or cross_tower_ops,
auto_shard_dataset)
super(MirroredStrategy, self).__init__(extended)
# Override to change the documentation to reflect the different handling of
# global vs. local batch size between core and contrib.
def make_dataset_iterator(self, dataset): # pylint: disable=useless-super-delegation
"""Makes an iterator for input provided via `dataset`.
NOTE: The batch size of the `dataset` argument is treated differently for
this contrib version of `MirroredStrategy`.
Data from the given dataset will be distributed evenly across all the
compute replicas. We will assume that the input dataset is batched by the
per-replica batch size.
The user could also use `make_input_fn_iterator` if they want to
customize which input is fed to which replica/worker etc.
Args:
dataset: `tf.data.Dataset` that will be distributed evenly across all
replicas.
Returns:
An `tf.distribute.InputIterator` which returns inputs for each step of the
computation. User should call `initialize` on the returned iterator.
"""
return super(MirroredStrategy, self).make_dataset_iterator(dataset)
class MirroredExtended(CoreMirroredExtended):
"""Implementation of (contrib) MirroredStrategy."""
def __init__(self,
container_strategy,
devices=None,
num_gpus_per_worker=None,
cross_device_ops=None,
auto_shard_dataset=False):
if devices is None:
devices = mirrored_strategy.all_local_devices(num_gpus_per_worker)
elif num_gpus_per_worker is not None:
raise ValueError(
"Must only specify one of `devices` and `num_gpus_per_worker`.")
super(MirroredExtended, self).__init__(container_strategy, devices,
cross_device_ops)
self._auto_shard_dataset = auto_shard_dataset
def _make_dataset_iterator(self, dataset):
"""Make iterator from dataset without splitting the batch.
This implementation is different than the one in
`tf.distribute.MirroredStrategy` for purposes of backward compatibility.
We treat the incoming dataset's batch size as per replica batch size.
Args:
dataset: `tf.data.Dataset` for input.
Returns:
An `InputIterator` which returns inputs for each step of the computation.
"""
return input_lib.DatasetIterator(dataset, self._input_workers,
self._container_strategy())
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""The contrib version of Mirrored strategy uses per-replica batch size."""
return False
|
tensorflow-master
|
tensorflow/contrib/distribute/python/mirrored_strategy.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes implementing a multi-worker ps DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
# pylint: disable=protected-access,invalid-name,line-too-long
CoreParameterServerStrategy = parameter_server_strategy.ParameterServerStrategy
CoreParameterServerExtended = parameter_server_strategy.ParameterServerStrategyExtended
# pylint: enable=protected-access,invalid-name,line-too-long
class ParameterServerStrategy(distribute_lib.StrategyV1):
"""A parameter server DistributionStrategy.
*** contrib version ***
This strategy class works for both local training and between-graph replicated
training for multiple workers. If `cluster_spec` is specified, either passed
in to __init__() method or parsed from the
["TF_CONFIG" environment
variable](https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig),
variables and updates to those variables are assigned to parameter servers and
other operations are assigned to workers. If `cluster_spec` is not set, it
becomes local training where variables are assigned to local CPU or the only
GPU. When each worker has more than one GPU, operations will be replicated on
these GPUs. In both cases, operations are replicated but variables are not and
these workers share a common view for which parameter server a variable is
assigned to.
This class assumes between-graph replication will be used and works on a graph
for a particular worker. Note that each graph and worker is independent.
This means that while each worker will synchronously compute a single gradient
update across all GPUs, updates between workers proceed asynchronously.
Operations that occur only on the first replica (such as incrementing the
global step), will occur on the first replica *of every worker*.
It is expected to call `call_for_each_replica(fn, ...)` for any
operations which potentially can be replicated across replicas (i.e. multiple
GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra
caution needs to be taken:
1) Always use `tf.compat.v1.get_variable` instead of `tf.Variable` which
is not able to refer to the same variable on different replicas.
2) It is generally not recommended to open a device scope under the strategy's
scope. A device scope (i.e. calling `tf.device`) will be merged with or
override the device for operations but will not change the device for
variables.
3) It is also not recommended to open a colocation scope (i.e. calling
`tf.compat.v1.colocate_with`) under the strategy's scope. For colocating
variables, use `strategy.extended.colocate_vars_with` instead. Colocation of
ops will possibly create conflicts of device assignment.
"""
def __init__(self, num_gpus_per_worker=0):
"""Initializes this strategy.
Args:
num_gpus_per_worker: number of local GPUs or GPUs per worker, the default
is 0 meaning CPU only.
Raises:
ValueError: if `cluster_spec` is given but `task_type` or `task_id` is
not.
"""
super(ParameterServerStrategy, self).__init__(
ParameterServerExtended(self, num_gpus_per_worker))
# Override to change the documentation to reflect the different handling of
# global vs. local batch size between core and contrib.
def make_dataset_iterator(self, dataset): # pylint: disable=useless-super-delegation
"""Makes an iterator for input provided via `dataset`.
NOTE: The batch size of the `dataset` argument is treated differently for
this contrib version of `ParameterServerStrategy`.
Data from the given dataset will be distributed evenly across all the
compute replicas. We will assume that the input dataset is batched by the
per-replica batch size.
The user could also use `make_input_fn_iterator` if they want to
customize which input is fed to which replica/worker etc.
Args:
dataset: `tf.data.Dataset` that will be distributed evenly across all
replicas.
Returns:
An `tf.distribute.InputIterator` which returns inputs for each step of the
computation. User should call `initialize` on the returned iterator.
"""
return super(ParameterServerStrategy, self).make_dataset_iterator(dataset)
class ParameterServerExtended(CoreParameterServerExtended):
"""Implementation of ParameterServerStrategy."""
def __init__(self, container_strategy, num_gpus_per_worker):
# Use TFConfigClusterResolver to parse TF_CONFIG. We don't want to change
# the constructor's interface to allow customized cluster resolver. Use
# SimpleClusterResolver to override num_accelerators.
tfconfig = TFConfigClusterResolver()
cluster_resolver = SimpleClusterResolver(
cluster_spec=tfconfig.cluster_spec(),
task_type=tfconfig.task_type,
task_id=tfconfig.task_id,
num_accelerators={'GPU': num_gpus_per_worker})
super(ParameterServerExtended, self).__init__(
container_strategy, cluster_resolver=cluster_resolver)
def _make_dataset_iterator(self, dataset):
return input_lib.DatasetIterator(dataset, self._input_workers,
self._container_strategy())
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""The contrib version of PS strategy uses per-replica batch size."""
return False
|
tensorflow-master
|
tensorflow/contrib/distribute/python/parameter_server_strategy.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for accuracy and mathematical correctness of tf.keras multi-worker."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl.testing import parameterized
import numpy as np
# pylint: disable=g-direct-tensorflow-import
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy as collective_strategy
from tensorflow.contrib.distribute.python import keras_multi_worker_test_base
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.platform import test
np.random.seed(99)
EMBED_INPUTS = np.random.randint(0, 10, (6400, 1)).astype(np.int32)
EMBED_TARGETS = np.random.normal(0, 0.1, (6400, 1)).astype(np.float32)
IMAGE_INPUTS = np.random.normal(0, 0.1, (6400, 28, 28, 3)).astype(np.float32)
IMAGE_TARGETS = np.random.randint(0, 10, (6400, 1))
LSTM_INPUTS = np.random.normal(0, 0.1, (6400, 10, 20)).astype(np.float32)
LSTM_TARGETS = np.random.normal(0, 0.1, (6400, 1)).astype(np.float32)
def get_num_workers():
cluster_resolver = TFConfigClusterResolver()
cluster_spec = cluster_resolver.cluster_spec().as_dict()
if cluster_spec:
task_type = cluster_resolver.task_type
return int(multi_worker_util.worker_count(cluster_spec, task_type))
return 1
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(shape=(), initializer='zeros', name='bias')
def call(self, inputs):
return inputs + self.bias
class SimpleBiasTest(
keras_multi_worker_test_base.KerasIndependentWorkerTestBase,
parameterized.TestCase):
@keras_multi_worker_test_base.run_sync_strategies
def test_multi_worker_simple_bias_fit(self, strategy_cls):
def _worker_fn(results_without_ds=None):
# Make sure Session is cleared at the start of each run.
keras.backend._SESSION.session = None
x = ops.convert_to_tensor([[0.], [1.], [2.], [0.], [1.], [2.], [0.],
[1.]])
y = ops.convert_to_tensor([[0.5], [2.], [3.5], [0.5], [2.], [3.5], [0.5],
[2.]])
ds = dataset_ops.Dataset.from_tensor_slices((x, y))
ds = ds.batch(8)
model = keras.Sequential([Bias(input_shape=(1,))])
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.1), 'mae', metrics=['mae'])
history = model.fit(ds, epochs=5)
self.assertAllClose(history.history['loss'],
[0.9375, 0.8375, 0.7375, 0.6375, 0.5375])
self.assertAllClose(history.history['mean_absolute_error'],
[0.9375, 0.8375, 0.7375, 0.6375, 0.5375])
results = {'training': history.history}
if results_without_ds:
for key in results:
self.assertAllClose(
results[key],
results_without_ds[key],
msg='Fail to assert {}'.format(key))
return results
results_without_ds = _worker_fn()
self.run_independent_workers(
_worker_fn,
strategy_cls,
num_workers=2,
results_without_ds=results_without_ds)
def make_image_model(initial_weights=None):
image = keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = keras.layers.Conv2D(
name='conv1',
filters=16,
kernel_size=(3, 3),
strides=(4, 4),
kernel_regularizer=keras.regularizers.l2(1e-4))(
image)
c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
c1 = keras.layers.Flatten()(c1)
logits = keras.layers.Dense(10, activation='softmax', name='pred')(c1)
model = keras.Model(inputs=[image], outputs=[logits])
if initial_weights:
model.set_weights(initial_weights)
model.compile(
'sgd',
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model, IMAGE_INPUTS, IMAGE_TARGETS
def make_lstm_model(initial_weights=None):
inputs = keras.layers.Input(shape=(10, 20))
rnn_out = keras.layers.LSTM(4)(inputs)
outputs = keras.layers.Dense(1)(rnn_out)
model = keras.Model(inputs, outputs)
if initial_weights:
model.set_weights(initial_weights)
model.compile(
gradient_descent.SGD(0.1),
'sparse_categorical_crossentropy',
metrics=['sparse_categorical_crossentropy'])
return model, LSTM_INPUTS, LSTM_TARGETS
def make_embedding_model(initial_weights=None):
inputs = keras.layers.Input(shape=(1,), dtype='int32')
embeddings = keras.layers.Embedding(100, 5)(inputs)
outputs = keras.layers.Dense(1, activation='softmax')(embeddings)
model = keras.Model(inputs, outputs)
if initial_weights:
model.set_weights(initial_weights)
model.compile('rmsprop', 'mae', metrics=['binary_crossentropy'])
return model, EMBED_INPUTS, EMBED_TARGETS
class ModelCorrectnessTest(
keras_multi_worker_test_base.KerasIndependentWorkerTestBase,
parameterized.TestCase):
def make_dataset(self, inputs, targets, batch_size=64):
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(batch_size)
return dataset
@combinations.generate(
combinations.combine(
mode=['graph'],
strategy_cls=[
collective_strategy.CollectiveAllReduceStrategy,
],
make_model=[make_image_model, make_lstm_model, make_embedding_model],
required_gpus=[0, 1]))
def test_correctness(self, strategy_cls, make_model):
def _worker_fn(initial_weights=None, results_without_ds=None):
# Make sure Session is cleared at each run
# so that it can be configured properly for the DistributionStrategy.
keras.backend._SESSION.session = None
results = {}
model, inputs, targets = make_model(initial_weights)
data = self.make_dataset(inputs, targets)
# TODO(b/129363441): Remove `steps_per_epoch`.
results['training'] = model.fit(
data, steps_per_epoch=50, epochs=2).history
results['trained_weights'] = model.get_weights()
eval_data = self.make_dataset(inputs, targets)
results['evaluation'] = model.evaluate(eval_data, steps=50)
if results_without_ds:
for key in results:
self.assertAllClose(
results[key],
results_without_ds[key],
rtol=1e-5,
atol=1e-5,
msg='Fail to assert {}'.format(key))
return results
model, _, _ = make_model()
initial_weights = model.get_weights()
results_without_ds = _worker_fn(initial_weights=initial_weights)
self.run_independent_workers(
_worker_fn,
strategy_cls,
num_workers=2,
initial_weights=initial_weights,
results_without_ds=results_without_ds)
if __name__ == '__main__':
with test.mock.patch.object(sys, 'exit', os._exit):
test.main()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/keras_multi_worker_correctness_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ParameterServerStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import threading
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import parameter_server_strategy as core_parameter_server_strategy
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
CHIEF = run_config.TaskType.CHIEF
WORKER = run_config.TaskType.WORKER
PS = run_config.TaskType.PS
def _get_replica_id_integer():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if isinstance(replica_id, ops.Tensor):
replica_id = tensor_util.constant_value(replica_id)
return replica_id
def create_test_objects(cluster_spec=None,
task_type=None,
task_id=None,
num_gpus=None,
sess_config=None,
use_core_strategy=False):
sess_config = sess_config or config_pb2.ConfigProto()
if num_gpus is None:
num_gpus = context.num_gpus()
if use_core_strategy:
if cluster_spec and task_type and task_id is not None:
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={'GPU': num_gpus})
distribution = core_parameter_server_strategy.ParameterServerStrategy(
cluster_resolver)
target = 'grpc://' + cluster_spec[WORKER][task_id]
else:
distribution = (
central_storage_strategy.CentralStorageStrategy._from_num_gpus(
num_gpus))
target = ''
sess_config = copy.deepcopy(sess_config)
sess_config = distribution.update_config_proto(sess_config)
else:
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=num_gpus)
if task_type:
sess_config = copy.deepcopy(sess_config)
distribution.configure(
session_config=sess_config,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id)
target = 'grpc://' + cluster_spec[WORKER][task_id]
else:
target = ''
return distribution, target, sess_config
class ParameterServerStrategyTestBase(
multi_worker_test_base.MultiWorkerTestBase):
def setUp(self):
self._result = 0
self._lock = threading.Lock()
self._init_condition = threading.Condition()
self._init_reached = 0
self._finish_condition = threading.Condition()
self._finish_reached = 0
self._sess_config = config_pb2.ConfigProto(allow_soft_placement=True)
super(ParameterServerStrategyTestBase, self).setUp()
def _get_test_objects(self,
task_type,
task_id,
num_gpus,
use_core_strategy=False):
return create_test_objects(
cluster_spec=self._cluster_spec,
task_type=task_type,
task_id=task_id,
num_gpus=num_gpus,
sess_config=self._sess_config,
use_core_strategy=use_core_strategy)
def _test_device_assignment_distributed(self,
task_type,
task_id,
num_gpus,
use_core_strategy=False):
worker_device = '/job:%s/replica:0/task:%d' % (task_type, task_id)
d, _, sess_config = self._get_test_objects(
task_type, task_id, num_gpus, use_core_strategy=use_core_strategy)
with ops.Graph().as_default(), \
self.cached_session(target=self._default_target,
config=sess_config) as sess, \
d.scope():
# Define a variable outside the call_for_each_replica scope.
n = variable_scope.get_variable('n', initializer=10.0)
self.assertEqual(n.device, '/job:ps/task:0')
def model_fn():
if num_gpus == 0:
last_part_device = 'device:CPU:0'
else:
replica_id = _get_replica_id_integer()
last_part_device = ('device:GPU:%d' % replica_id)
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
self.assertEqual(a.device, worker_device + '/' + last_part_device)
self.assertEqual(b.device, worker_device + '/' + last_part_device)
self.assertEqual(c.device, worker_device + '/' + last_part_device)
# The device scope is ignored for variables but not for normal ops.
with ops.device('/job:worker/task:0'):
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
x_add = x.assign_add(c)
e = a + c
# The variable x is on the task 1 since the device_function has been
# called once before the model_fn.
self.assertEqual(x.device, '/job:ps/task:1')
self.assertEqual(x_add.device, x.device)
self.assertEqual(e.device,
'/job:worker/replica:0/task:0/%s' % last_part_device)
# The colocate_vars_with can override the distribution's device.
with d.extended.colocate_vars_with(x):
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
# We add an identity here to avoid complaints about summing
# non-distributed values.
y_add = y.assign_add(array_ops.identity(x_add))
self.assertEqual(y.device, '/job:ps/task:1')
self.assertEqual(y_add.device, y.device)
self.assertEqual(y.device, x.device)
z = variable_scope.get_variable(
'z', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(z.device, '/job:ps/task:0')
self.assertNotEqual(z.device, x.device)
with ops.control_dependencies([y_add]):
# We add an identity here to avoid complaints about summing
# non-distributed values.
z_add = z.assign_add(array_ops.identity(y))
with ops.control_dependencies([z_add]):
f = z + c
self.assertEqual(f.device, worker_device + '/' + last_part_device)
# The device scope would merge with the default worker device.
with ops.device('/CPU:1'):
g = e + 1.0
self.assertEqual(g.device, worker_device + '/device:CPU:1')
# Ths ops.colocate_with will be ignored when defining a variale but not
# for a normal tensor.
with ops.colocate_with(x):
u = variable_scope.get_variable('u', initializer=30.0)
v = variable_scope.get_variable('v', initializer=30.0)
h = f + 1.0
self.assertIn('/job:ps/', u.device)
self.assertIn('/job:ps/', v.device)
# u and v are on different parameter servers.
self.assertTrue(u.device != x.device or v.device != x.device)
self.assertTrue(u.device == x.device or v.device == x.device)
# Here h is not on one worker. Note h.device is canonical while x.device
# is not but.
self.assertIn('/job:ps/', h.device)
return y_add, z_add, f
y, z, f = d.extended.call_for_each_replica(model_fn)
self.assertNotEqual(y, None)
self.assertNotEqual(z, None)
self.assertNotEqual(f, None)
if context.num_gpus() >= 1 and num_gpus <= 1:
variables.global_variables_initializer().run()
y_val, z_val, f_val = sess.run([y, z, f])
self.assertEqual(y_val, 33.0)
self.assertEqual(z_val, 43.0)
self.assertEqual(f_val, 46.0)
def _test_device_assignment_distributed_enable_partitioner(
self, task_type, task_id, num_gpus, use_core_strategy=False):
d, _, sess_config = self._get_test_objects(
task_type, task_id, num_gpus, use_core_strategy=use_core_strategy)
num_shards = len(d.extended.parameter_devices)
partitioner = partitioned_variables.fixed_size_partitioner(num_shards)
with ops.Graph().as_default(), \
self.cached_session(target=self._default_target,
config=sess_config) as sess, \
d.scope():
n = variable_scope.get_variable(
'n',
initializer=constant_op.constant([10.0, 20.0]),
aggregation=variable_scope.VariableAggregation.SUM,
partitioner=partitioner)
for part_id, var in enumerate(n):
self.assertEqual(var.device, '/job:ps/task:%d' % part_id)
def model_fn():
a = constant_op.constant([3.0, 5.0])
# The device scope is ignored for variables but not for normal ops.
with ops.device('/job:worker/task:0'):
x = variable_scope.get_variable(
'x',
initializer=constant_op.constant([10.0, 20.0]),
aggregation=variable_scope.VariableAggregation.SUM,
partitioner=partitioner)
x_add = x.assign_add(a, name='x_add')
# The variable x is on the task 1 since the device_function has been
# called once before the model_fn.
for part_id, var in enumerate(x):
self.assertEqual(var.device, '/job:ps/task:%d' % part_id)
self.assertEqual(var.device, x_add[part_id].device)
return x_add
x = d.extended.call_for_each_replica(model_fn)
if context.num_gpus() >= 1:
variables.global_variables_initializer().run()
x_val = sess.run(x)
if num_gpus < 1:
self.assertEqual(x_val, [13.0, 25.0])
else:
x_expect = [10.0 + 3 * num_gpus, 20.0 + 5 * num_gpus]
self.assertEqual(x_val, x_expect)
def _test_device_assignment_local(self,
d,
compute_device='CPU',
variable_device='CPU',
num_gpus=0):
with ops.Graph().as_default(), \
self.cached_session(target=self._default_target,
config=self._sess_config) as sess, \
d.scope():
def model_fn():
if 'CPU' in compute_device:
replica_compute_device = '/device:CPU:0'
else:
replica_id = _get_replica_id_integer()
replica_compute_device = ('/device:GPU:%d' % replica_id)
replica_compute_device = device_util.canonicalize(
replica_compute_device)
if 'CPU' in variable_device:
replica_variable_device = '/device:CPU:0'
else:
replica_id = _get_replica_id_integer()
replica_variable_device = ('/device:GPU:%d' % replica_id)
replica_variable_device = device_util.canonicalize(
replica_variable_device)
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
self.assertEqual(a.device, replica_compute_device)
self.assertEqual(b.device, replica_compute_device)
self.assertEqual(c.device, replica_compute_device)
# The device scope is ignored for variables but not for normal ops.
with ops.device('/device:GPU:2'):
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
x_add = x.assign_add(c)
e = a + c
self.assertEqual(
device_util.canonicalize(x.device), replica_variable_device)
self.assertEqual(x_add.device, x.device)
self.assertEqual(e.device, device_util.canonicalize('/device:GPU:2'))
# The colocate_vars_with can override the distribution's device.
with d.extended.colocate_vars_with(x):
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
# We add an identity here to avoid complaints about summing
# non-distributed values.
y_add = y.assign_add(array_ops.identity(x_add))
self.assertEqual(
device_util.canonicalize(y.device), replica_variable_device)
self.assertEqual(y_add.device, y.device)
self.assertEqual(y.device, x.device)
z = variable_scope.get_variable(
'z', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(
device_util.canonicalize(z.device), replica_variable_device)
with ops.control_dependencies([y_add]):
# We add an identity here to avoid complaints about summing
# non-distributed values.
z_add = z.assign_add(array_ops.identity(y))
with ops.control_dependencies([z_add]):
f = z + c
self.assertEqual(f.device, replica_compute_device)
# The device scope would merge with the default worker device.
with ops.device('/CPU:1'):
g = e + 1.0
self.assertEqual(g.device, device_util.canonicalize('/device:CPU:1'))
# Ths ops.colocate_with will be ignored when defining a variale but not
# for a normal tensor.
with ops.colocate_with(x):
u = variable_scope.get_variable('u', initializer=30.0)
h = f + 1.0
self.assertEqual(
device_util.canonicalize(u.device), replica_variable_device)
self.assertEqual(
device_util.canonicalize(x.device),
device_util.canonicalize(h.device))
return y_add, z_add, f
y, z, f = d.extended.call_for_each_replica(model_fn)
self.assertNotEqual(y, None)
self.assertNotEqual(z, None)
self.assertNotEqual(f, None)
if context.num_gpus() >= 1 and num_gpus <= 1:
variables.global_variables_initializer().run()
y_val, z_val, f_val = sess.run([y, z, f])
self.assertEqual(y_val, 33.0)
self.assertEqual(z_val, 43.0)
self.assertEqual(f_val, 46.0)
def _test_simple_increment(self,
task_type,
task_id,
num_gpus,
use_core_strategy=False):
d, master_target, sess_config = self._get_test_objects(
task_type, task_id, num_gpus, use_core_strategy=use_core_strategy)
if d.extended._cluster_spec:
num_workers = len(d.extended._cluster_spec.as_dict().get(WORKER))
if 'chief' in d.extended._cluster_spec.as_dict():
num_workers += 1
else:
num_workers = 1
with ops.Graph().as_default(), \
self.cached_session(target=master_target,
config=sess_config) as sess, \
d.scope():
def model_fn():
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
z = variable_scope.get_variable(
'z', initializer=30.0,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
# We explicitly make a constant tensor here to avoid complaints about
# summing non-distributed values.
one = constant_op.constant(1.0)
x_add = x.assign_add(one, use_locking=True)
y_add = y.assign_add(one, use_locking=True)
z_add = z.assign_add(one, use_locking=True)
train_op = control_flow_ops.group(x_add, y_add, z_add)
return x, y, z, train_op
x, y, z, train_op = d.extended.call_for_each_replica(model_fn)
train_op = d.group(train_op)
if context.num_gpus() < sum(
1 for d in d.extended.worker_devices if 'GPU' in d.upper()):
return True
if task_id == 0:
variables.global_variables_initializer().run()
# Workers waiting for chief worker's initializing variables.
self._init_condition.acquire()
self._init_reached += 1
while self._init_reached != num_workers:
self._init_condition.wait()
self._init_condition.notify_all()
self._init_condition.release()
sess.run(train_op)
# Wait for other workers to finish training.
self._finish_condition.acquire()
self._finish_reached += 1
while self._finish_reached != num_workers:
self._finish_condition.wait()
self._finish_condition.notify_all()
self._finish_condition.release()
x_val, y_val, z_val = sess.run([x, y, z])
self.assertEqual(x_val, 10.0 + 1.0 * num_workers * d.num_replicas_in_sync)
self.assertEqual(y_val, 20.0 + 1.0 * num_workers * d.num_replicas_in_sync)
self.assertEqual(z_val, 30.0 + 1.0 * num_workers)
return (x_val == 10.0 + 1.0 * num_workers * d.num_replicas_in_sync and
y_val == 20.0 + 1.0 * num_workers * d.num_replicas_in_sync and
z_val == 30.0 + 1.0 * num_workers)
def _test_minimize_loss_graph(self,
task_type,
task_id,
num_gpus,
use_core_strategy=False):
d, master_target, sess_config = self._get_test_objects(
task_type, task_id, num_gpus, use_core_strategy=use_core_strategy)
if task_type:
# Multi-worker
assert hasattr(d.extended, '_cluster_spec') and d.extended._cluster_spec
num_workers = len(d.extended._cluster_spec.as_dict().get(WORKER))
if CHIEF in d.extended._cluster_spec.as_dict():
num_workers += 1
else:
# local
num_workers = 1
with ops.Graph().as_default(), \
self.cached_session(target=master_target,
config=sess_config) as sess, \
d.scope():
l = core.Dense(1, use_bias=False)
def loss_fn(x):
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(yuefengz, apassos): eager.backprop.implicit_grad is not safe for
# multiple graphs (b/111216820).
def grad_fn(x):
loss = loss_fn(x)
var_list = (
variables.trainable_variables() + ops.get_collection(
ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
grads = gradients.gradients(loss, var_list)
ret = list(zip(grads, var_list))
return ret
def update(v, g):
return v.assign_sub(0.05 * g, use_locking=True)
one = constant_op.constant([[1.]])
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.extended.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.extended.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
# TODO(yuefengz): support non-Mirrored variable as destinations.
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(
d.extended.update(v, update, args=(g,), group=False)):
after_list.append(d.extended.read_var(v))
return before_list, after_list
before_out, after_out = step()
if context.num_gpus() < sum(
1 for d in d.extended.worker_devices if 'GPU' in d.upper()):
return True
if (not task_type or
multi_worker_util.is_chief(
d.extended._cluster_spec, task_type, task_id)):
variables.global_variables_initializer().run()
# Workers waiting for chief worker's initializing variables.
self._init_condition.acquire()
self._init_reached += 1
while self._init_reached != num_workers:
self._init_condition.wait()
self._init_condition.notify_all()
self._init_condition.release()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
return error_after < error_before
def _test_input_fn_iterator(self,
task_type,
task_id,
num_gpus,
input_fn,
expected_values,
test_reinitialize=True,
ignore_order=False,
use_core_strategy=False):
distribution, master_target, config = self._get_test_objects(
task_type, task_id, num_gpus, use_core_strategy=use_core_strategy)
devices = distribution.extended.worker_devices
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess:
iterator = distribution.make_input_fn_iterator(input_fn)
sess.run(iterator.initialize())
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
if ignore_order:
self.assertCountEqual(expected_value, computed_value)
else:
self.assertEqual(expected_value, computed_value)
with self.assertRaises(errors.OutOfRangeError):
next_element = iterator.get_next()
sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
# After re-initializing the iterator, should be able to iterate again.
if test_reinitialize:
sess.run(iterator.initialize())
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
if ignore_order:
self.assertCountEqual(expected_value, computed_value)
else:
self.assertEqual(expected_value, computed_value)
class ParameterServerStrategyTest(
ParameterServerStrategyTestBase,
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2)
cls._default_target = 'grpc://' + cls._cluster_spec[WORKER][0]
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def test_num_replicas_in_sync(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=2, use_core_strategy=use_core_strategy)
# All the devices on a given worker are in sync which in this case is the
# number of gpus on each worker.
self.assertEqual(2, strategy.num_replicas_in_sync)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testDeviceAssignmentLocalCPU(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=0, use_core_strategy=use_core_strategy)
self._test_device_assignment_local(
strategy, compute_device='CPU', variable_device='CPU', num_gpus=0)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testDeviceAssignmentLocalOneGPU(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=1, use_core_strategy=use_core_strategy)
self._test_device_assignment_local(
strategy, compute_device='GPU', variable_device='GPU', num_gpus=1)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testDeviceAssignmentLocalTwoGPUs(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=2, use_core_strategy=use_core_strategy)
self._test_device_assignment_local(
strategy, compute_device='GPU', variable_device='CPU', num_gpus=2)
@combinations.generate(
combinations.combine(
mode=['graph'], num_gpus=[0, 1, 2], use_core_strategy=[True, False]))
def testDeviceAssignmentDistributed(self, num_gpus, use_core_strategy):
self._test_device_assignment_distributed(
'worker', 1, num_gpus, use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'], num_gpus=[0, 1, 2], use_core_strategy=[True, False]))
def testDeviceAssignmentDistributedEnablePartitioner(self, num_gpus,
use_core_strategy):
self._test_device_assignment_distributed_enable_partitioner(
'worker', 1, num_gpus, use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testSimpleBetweenGraph(self, use_core_strategy):
self._run_between_graph_clients(
self._test_simple_increment,
self._cluster_spec,
context.num_gpus(),
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'], num_gpus=[0, 1, 2], use_core_strategy=[True, False]))
def testLocalSimpleIncrement(self, num_gpus, use_core_strategy):
self._test_simple_increment(None, 0, num_gpus, use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'], num_gpus=[0, 1, 2], use_core_strategy=[True, False]))
def testMinimizeLossGraphDistributed(self, num_gpus, use_core_strategy):
self._run_between_graph_clients(
self._test_minimize_loss_graph,
self._cluster_spec,
num_gpus,
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'], num_gpus=[0, 1, 2], use_core_strategy=[True, False]))
def testMinimizeLossGraphLocal(self, num_gpus, use_core_strategy):
self._test_minimize_loss_graph(None, None, num_gpus, use_core_strategy)
# TODO(priyag): Refactor this and other multi worker tests.
@combinations.generate(
combinations.combine(
mode=['graph'],
num_gpus=[1, 2],
required_gpus=1,
use_core_strategy=[True, False],
use_dataset=[True, False]))
def testMakeInputFnIteratorDistributed(
self, num_gpus, use_core_strategy, use_dataset):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [[i+j for j in range(num_gpus)]
for i in range(0, 100, num_gpus)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_gpus,
expected_num_input_pipelines=3,
expected_input_pipeline_id=1) # because task_id = 1
self._test_input_fn_iterator(
'worker',
1,
num_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
ignore_order=not use_dataset,
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'],
num_gpus=[1, 2],
required_gpus=1,
use_core_strategy=[True, False],
use_dataset=[True, False]))
def testMakeInputFnIteratorLocal(self, num_gpus, use_core_strategy,
use_dataset):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [[i+j for j in range(num_gpus)]
for i in range(0, 100, num_gpus)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_gpus,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0) # only one worker and pipeline for local.
self._test_input_fn_iterator(
None,
None,
num_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
ignore_order=not use_dataset,
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testGlobalStepUpdate(self, use_core_strategy):
strategy, _, _ = create_test_objects(use_core_strategy=use_core_strategy)
self._test_global_step_update(strategy)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testUpdateConfigProtoMultiWorker(self, use_core_strategy):
strategy, _, _ = create_test_objects(
cluster_spec=self._cluster_spec,
task_type='worker',
task_id=1,
num_gpus=2,
use_core_strategy=use_core_strategy)
config_proto = config_pb2.ConfigProto(device_filters=['to_be_overridden'])
new_config = strategy.update_config_proto(config_proto)
# Verify device filters.
self.assertEqual(['/job:worker/task:1', '/job:ps'],
new_config.device_filters)
# Verify isolate_session_state
self.assertFalse(new_config.isolate_session_state)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testUpdateConfigProtoLocal(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=2, use_core_strategy=use_core_strategy)
config_proto = config_pb2.ConfigProto()
new_config = strategy.update_config_proto(config_proto)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
@combinations.generate(combinations.combine(required_gpus=[2]))
def testAllReduceSum(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2)
self._test_all_reduce_sum(distribution)
@combinations.generate(combinations.combine(required_gpus=[2]))
def testAllReduceSumGradients(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2)
self._test_all_reduce_sum_gradients(distribution)
@combinations.generate(combinations.combine(required_gpus=[2]))
def testAllReduceSumGradientTape(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2)
self._test_all_reduce_sum_gradient_tape(distribution)
@combinations.generate(combinations.combine(required_gpus=[2]))
def testAllReduceMean(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2)
self._test_all_reduce_mean(distribution)
@combinations.generate(combinations.combine(required_gpus=[2]))
def testAllReduceMeanGradients(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2)
self._test_all_reduce_mean_gradients(distribution)
@combinations.generate(combinations.combine(required_gpus=[2]))
def testAllReduceMeanGradientTape(self):
distribution = parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2)
self._test_all_reduce_mean_gradient_tape(distribution)
def testTrainableVariables(self):
distribution = parameter_server_strategy.ParameterServerStrategy()
self._test_trainable_variable(distribution)
class ParameterServerStrategyWithChiefTest(ParameterServerStrategyTestBase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2, has_chief=True)
cls._default_target = 'grpc://' + cls._cluster_spec[CHIEF][0]
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testSimpleBetweenGraph(self, use_core_strategy):
self._run_between_graph_clients(
self._test_simple_increment,
self._cluster_spec,
context.num_gpus(),
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'], num_gpus=[0, 1, 2], use_core_strategy=[True, False]))
def testMinimizeLossGraph(self, num_gpus, use_core_strategy):
self._run_between_graph_clients(
self._test_minimize_loss_graph,
self._cluster_spec,
num_gpus,
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testGlobalStepIsWrappedOnTwoGPUs(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=2, use_core_strategy=use_core_strategy)
with ops.Graph().as_default(), strategy.scope():
created_step = training_util.create_global_step()
get_step = training_util.get_global_step()
self.assertEqual(created_step, get_step,
msg=('created_step %s type %s vs. get_step %s type %s' %
(id(created_step), created_step.__class__.__name__,
id(get_step), get_step.__class__.__name__)))
self.assertIs(values.AggregatingVariable, type(created_step))
self.assertIs(values.AggregatingVariable, type(get_step))
self.assertIs(strategy, created_step.distribute_strategy)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testGlobalStepIsNotWrappedOnOneGPU(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=1, use_core_strategy=use_core_strategy)
with ops.Graph().as_default(), strategy.scope():
created_step = training_util.create_global_step()
get_step = training_util.get_global_step()
self.assertEqual(created_step, get_step,
msg=('created_step %s type %s vs. get_step %s type %s' %
(id(created_step), created_step.__class__.__name__,
id(get_step), get_step.__class__.__name__)))
self.assertIs(resource_variable_ops.ResourceVariable, type(created_step))
self.assertIs(resource_variable_ops.ResourceVariable, type(get_step))
# All variables have an _distribute_strategy parameter. Only variable
# subclasses in distribution strategy expose it publicly.
self.assertFalse(hasattr(strategy, 'distribute_strategy'))
self.assertIs(strategy, created_step._distribute_strategy)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testValueContainer(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=2, use_core_strategy=use_core_strategy)
with ops.Graph().as_default(), strategy.scope():
def f():
with backprop.GradientTape() as tape:
v = variable_scope.get_variable('v', initializer=10.0)
_ = v * v
v, = tape.watched_variables()
w = strategy.extended.value_container(v)
self.assertIs(values.AggregatingVariable, type(w))
strategy.extended.call_for_each_replica(f)
class CentralStorageStrategyTest(strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager'],
use_core_strategy=[True, False],
required_gpus=2))
def testNumpyDataset(self, use_core_strategy):
strategy, _, _ = create_test_objects(
num_gpus=2, use_core_strategy=use_core_strategy)
self._test_numpy_dataset(strategy)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/parameter_server_strategy_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
# TODO(yuefengz): support in-graph replication.
class CollectiveAllReduceStrategy(distribute_lib.StrategyV1):
"""Distribution strategy that uses collective ops for all-reduce.
*** contrib version ***
It is similar to the MirroredStrategy but it uses collective ops for
reduction.
When `cluster_spec` is given by the `configure` method, it turns into the
mulit-worker version that works on multiple workers with between-graph
replication.
Note: `configure` will be called by higher-level APIs if running in
distributed environment.
"""
def __init__(self,
num_gpus_per_worker=0,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO):
"""Initializes the object.
Args:
num_gpus_per_worker: number of local GPUs or GPUs per worker, the default
is 0 meaning CPU only.
communication: optional Enum of type
`distribute.experimental.CollectiveCommunication`. This provides a way
for the user to override the choice of collective op communication.
Possible values include `AUTO`, `RING`, and `NCCL`.
"""
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
num_gpus_per_worker=num_gpus_per_worker,
communication=communication))
class CollectiveAllReduceExtended(
collective_all_reduce_strategy.CollectiveAllReduceExtended):
"""Implementation of CollectiveAllReduceStrategy."""
def __init__(self,
container_strategy,
num_gpus_per_worker,
communication):
# Use TFConfigClusterResolver to parse TF_CONFIG. We don't want to change
# the constructor's interface to allow customized cluster resolver. Use
# SimpleClusterResolver to override num_accelerators.
tfconfig = TFConfigClusterResolver()
cluster_resolver = SimpleClusterResolver(
cluster_spec=tfconfig.cluster_spec(),
task_type=tfconfig.task_type,
task_id=tfconfig.task_id,
num_accelerators={"GPU": num_gpus_per_worker},
rpc_layer=tfconfig.rpc_layer)
super(CollectiveAllReduceExtended, self).__init__(
container_strategy,
communication=communication,
cluster_resolver=cluster_resolver)
|
tensorflow-master
|
tensorflow/contrib/distribute/python/collective_all_reduce_strategy.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the contrib MirroredStrategy specific features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.eager import test
contrib_mirrored_strategies = [
combinations.NamedDistribution(
"ContribMirrored1CPU",
lambda: mirrored_strategy.MirroredStrategy(["/cpu:0"])),
combinations.NamedDistribution(
"ContribMirrored1GPU",
lambda: mirrored_strategy.MirroredStrategy(["/gpu:0"]),
required_gpus=1),
combinations.NamedDistribution(
"ContribMirroredCPUAndGPU",
lambda: mirrored_strategy.MirroredStrategy(["/cpu:0", "/gpu:0"]),
required_gpus=1),
combinations.NamedDistribution(
"ContribMirrored2GPU",
lambda: mirrored_strategy.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2),
]
def all_strategy_and_eager_plus_graph():
return combinations.times(
combinations.combine(distribution=contrib_mirrored_strategies),
combinations.combine(mode=["eager", "graph"]))
class ContribMirroredStrategyTest(test.TestCase, parameterized.TestCase):
def _initialize_and_evaluate_iterator(self, iterator):
if context.executing_eagerly():
iterator.initialize()
res = iterator.get_next()
if isinstance(res, values.PerReplica):
res = res.values
else:
with self.cached_session() as sess:
sess.run(iterator.initialize())
res = iterator.get_next()
if isinstance(res, values.PerReplica):
res = sess.run(res.values)
else:
res = sess.run(res)
return res
@combinations.generate(all_strategy_and_eager_plus_graph())
def test_dataset_iterator(self, distribution):
data = np.array([[1, 1], [2, 1], [3, 1], [4, 1]])
dataset = dataset_ops.Dataset.from_tensors(data).repeat()
iterator = distribution.make_dataset_iterator(dataset)
res = self._initialize_and_evaluate_iterator(iterator)
if isinstance(res, tuple):
self.assertLen(res, 2)
self.assertAllEqual(data, res[0])
self.assertAllEqual(data, res[1])
else:
self.assertAllEqual(data, res)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/contrib_mirrored_strategy_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for class Monitor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import monitor as monitor_lib
from tensorflow.python.client import session
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute.single_loss_example import single_loss_example
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.training import gradient_descent
class MonitorTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(
mode=strategy_combinations.graph_and_eager_modes)))
def testTrainNetwork(self, distribution, optimizer_fn):
with distribution.scope():
single_loss_step, layer = single_loss_example(optimizer_fn, distribution)
if context.executing_eagerly():
monitor = monitor_lib.Monitor(single_loss_step, None)
else:
with self.cached_session() as sess:
monitor = monitor_lib.Monitor(single_loss_step, sess)
monitor.run_steps(1)
self.assertEqual(1, len(layer.trainable_variables))
mirrored_weight_variable = layer.trainable_variables[0]
start_error = self.evaluate(mirrored_weight_variable)
start_error = abs(numpy.array(start_error) - 1)
monitor.run_steps(9)
end_error = self.evaluate(mirrored_weight_variable)
end_error = abs(numpy.array(end_error) - 1)
self.assertGreaterEqual(start_error, end_error)
def testPassingASessionInEager(self):
distribution = one_device_strategy.OneDeviceStrategy(
"/device:CPU:0")
step_function, _ = single_loss_example(
lambda: gradient_descent.GradientDescentOptimizer(0.2), distribution)
with session.Session() as sess, context.eager_mode():
with self.assertRaisesRegexp(ValueError, "Should not provide"):
_ = monitor_lib.Monitor(step_function, sess)
def testNotPassingASessionInGraph(self):
distribution = one_device_strategy.OneDeviceStrategy(
"/device:CPU:0")
step_function, _ = single_loss_example(
lambda: gradient_descent.GradientDescentOptimizer(0.2), distribution)
with context.graph_mode(), ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "Should provide"):
_ = monitor_lib.Monitor(step_function, session=None)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/monitor_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for running legacy optimizer code with DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute.single_loss_example import minimize_loss_example
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
# pylint: disable=g-long-lambda
gradient_descent_optimizer_v2_fn = combinations.NamedObject(
"GradientDescentV2", lambda: gradient_descent_v2.GradientDescentOptimizer(
0.2))
adagrad_optimizer_v2_fn = combinations.NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)
class MinimizeLossOptimizerV2Test(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
distributions_and_v2_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])))
def testTrainNetwork(self, distribution, optimizer_fn,
use_callable_loss=True):
with distribution.scope():
optimizer = optimizer_fn()
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer, use_bias=True, use_callable_loss=use_callable_loss)
iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())
def run_step():
return control_flow_ops.group(
distribution.experimental_local_results(
distribution.extended.call_for_each_replica(
model_fn, args=(iterator.get_next(),))))
if not context.executing_eagerly():
with self.cached_session() as sess:
sess.run(iterator.initialize())
run_step = sess.make_callable(run_step())
self.evaluate(variables.global_variables_initializer())
weights, biases = [], []
for _ in range(10):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/optimizer_v2_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CollectiveAllReduceStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import collective_all_reduce_strategy as core_collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import training_util
from tensorflow.python.training.server_lib import ClusterSpec
class MockCollectiveAllReduceStrategy(distribute_lib.StrategyV1):
"""Mock the strategy to allow cluster resolver as an argument."""
def __init__(self, cluster_resolver):
super(MockCollectiveAllReduceStrategy, self).__init__(
core_collective_all_reduce_strategy.CollectiveAllReduceExtended(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO,
cluster_resolver=cluster_resolver))
def create_test_objects(cluster_spec=None,
task_type=None,
task_id=None,
num_gpus=None,
use_core_strategy=False):
sess_config = config_pb2.ConfigProto()
if num_gpus is None:
num_gpus = context.num_gpus()
if use_core_strategy:
if cluster_spec and task_type and task_id is not None:
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={'GPU': num_gpus})
target = 'grpc://' + cluster_spec[task_type][task_id]
else:
cluster_resolver = SimpleClusterResolver(
ClusterSpec({}), num_accelerators={'GPU': num_gpus})
target = ''
strategy = MockCollectiveAllReduceStrategy(cluster_resolver)
sess_config = strategy.update_config_proto(sess_config)
else:
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy(
num_gpus_per_worker=num_gpus)
if task_type and task_id is not None:
strategy.configure(
session_config=sess_config,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id)
target = 'grpc://' + cluster_spec[task_type][task_id]
else:
target = ''
return strategy, target, sess_config
class CollectiveAllReduceStrategyTestBase(
multi_worker_test_base.MultiWorkerTestBase):
collective_key_base = 0
def setUp(self):
# We use a different key_base for each test so that collective keys won't be
# reused.
# TODO(yuefengz, ayushd): enable it to reuse collective keys in different
# tests.
CollectiveAllReduceStrategyTestBase.collective_key_base += 100000
super(CollectiveAllReduceStrategyTestBase, self).setUp()
def _get_test_object(self,
task_type,
task_id,
num_gpus=0,
use_core_strategy=False):
strategy, target, session_config = create_test_objects(
cluster_spec=self._cluster_spec,
task_type=task_type,
task_id=task_id,
num_gpus=num_gpus,
use_core_strategy=use_core_strategy)
collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=10 +
CollectiveAllReduceStrategyTestBase.collective_key_base,
op_instance_key_start=100 +
CollectiveAllReduceStrategyTestBase.collective_key_base,
variable_instance_key_start=10000 +
CollectiveAllReduceStrategyTestBase.collective_key_base)
strategy.extended._collective_keys = collective_keys
strategy.extended._cross_device_ops._collective_keys = (collective_keys)
return strategy, target, session_config
def _test_minimize_loss_graph(self,
task_type,
task_id,
num_gpus,
use_core_strategy=False):
d, master_target, config = self._get_test_object(task_type, task_id,
num_gpus)
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess, \
d.scope():
l = core.Dense(1, use_bias=False,
name='gpu_%d' % d.extended._num_gpus_per_worker)
def loss_fn(x):
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(yuefengz, apassos): eager.backprop.implicit_grad is not safe for
# multiple graphs (b/111216820).
def grad_fn(x):
loss = loss_fn(x)
var_list = (
variables.trainable_variables() + ops.get_collection(
ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
grads = gradients.gradients(loss, var_list)
ret = list(zip(grads, var_list))
return ret
def update(v, g):
return v.assign_sub(0.05 * g, use_locking=True)
one = constant_op.constant([[1.]])
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.extended.call_for_each_replica(grad_fn, args=[one])
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.extended.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
# TODO(yuefengz): support non-Mirrored variable as destinations.
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(
d.extended.update(v, update, args=(g,), group=False)):
after_list.append(d.extended.read_var(v))
return before_list, after_list
before_out, after_out = step()
if context.num_gpus() < d.extended._num_gpus_per_worker:
return True
sess.run(variables.global_variables_initializer())
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
return error_after < error_before
def _test_complex_model(self,
task_type,
task_id,
num_gpus,
use_core_strategy=False):
d, master_target, config = self._get_test_object(task_type, task_id,
num_gpus)
def model_fn():
"""Mnist model with synthetic input."""
data_format = 'channels_last'
input_shape = [28, 28, 1]
l = keras.layers
max_pool = l.MaxPooling2D((2, 2), (2, 2),
padding='same',
data_format=data_format)
model = keras.Sequential([
l.Reshape(target_shape=input_shape, input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=nn.relu), max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=nn.relu), max_pool,
l.Flatten(),
l.Dense(1024, activation=nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
image = random_ops.random_uniform([2, 28, 28])
label = random_ops.random_uniform([2, 1], maxval=10, dtype=dtypes.int32)
logits = model(image, training=True)
# TODO(yuefengz): make loss a callable for eager mode.
loss = losses.sparse_softmax_cross_entropy(labels=label, logits=logits)
optimizer = adam.AdamOptimizer(learning_rate=1e-4)
train_op = optimizer.minimize(loss,
training_util.get_or_create_global_step())
return train_op
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess:
with d.scope():
train_op = d.extended.call_for_each_replica(model_fn)
train_op = d.group(d.experimental_local_results(train_op))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
return True
def _test_variable_initialization(self,
task_type,
task_id,
num_gpus,
use_core_strategy=False):
distribution, master_target, config = self._get_test_object(
task_type, task_id, num_gpus)
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess, \
distribution.scope():
def model_fn():
x = variable_scope.get_variable(
'x',
shape=(2, 3),
initializer=init_ops.random_uniform_initializer(
1.0, 10.0, dtype=dtypes.float32))
return array_ops.identity(x)
x = distribution.extended.call_for_each_replica(model_fn)
reduced_x = distribution.reduce(reduce_util.ReduceOp.MEAN, x, axis=None)
x = distribution.experimental_local_results(x)[0]
sess.run(variables.global_variables_initializer())
x_value, reduced_x_value = sess.run([x, reduced_x])
self.assertTrue(
np.allclose(x_value, reduced_x_value, atol=1e-5),
msg=('x_value = %r, reduced_x_value = %r' % (x_value,
reduced_x_value)))
return np.allclose(x_value, reduced_x_value, atol=1e-5)
def _test_input_fn_iterator(self,
task_type,
task_id,
num_gpus,
input_fn,
expected_values,
test_reinitialize=True,
ignore_order=False,
use_core_strategy=False):
distribution, master_target, config = self._get_test_object(
task_type, task_id, num_gpus)
devices = distribution.extended.worker_devices
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess:
iterator = distribution.make_input_fn_iterator(input_fn)
sess.run(iterator.initialize())
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
if ignore_order:
self.assertCountEqual(expected_value, computed_value)
else:
self.assertEqual(expected_value, computed_value)
with self.assertRaises(errors.OutOfRangeError):
next_element = iterator.get_next()
sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
# After re-initializing the iterator, should be able to iterate again.
if test_reinitialize:
sess.run(iterator.initialize())
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
if ignore_order:
self.assertCountEqual(expected_value, computed_value)
else:
self.assertEqual(expected_value, computed_value)
class DistributedCollectiveAllReduceStrategyTest(
CollectiveAllReduceStrategyTestBase,
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def test_num_replicas_in_sync(self, use_core_strategy):
distribution, _, _ = create_test_objects(
cluster_spec=self._cluster_spec,
task_type='worker',
task_id=0,
num_gpus=2,
use_core_strategy=use_core_strategy)
num_workers = len(self._cluster_spec.get('chief', []) +
self._cluster_spec.get('worker', []))
self.assertEqual(2 * num_workers,
distribution.num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=['graph'],
num_gpus=[0, 1, 2],
required_gpus=1,
use_core_strategy=[True, False]))
def testMinimizeLossGraph(self, num_gpus, use_core_strategy):
self._run_between_graph_clients(
self._test_minimize_loss_graph,
self._cluster_spec,
num_gpus,
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'],
num_gpus=[0, 1, 2],
required_gpus=1,
use_core_strategy=[True, False]))
def testVariableInitialization(self, num_gpus, use_core_strategy):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
self._run_between_graph_clients(
self._test_variable_initialization,
self._cluster_spec,
num_gpus=num_gpus,
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'],
num_gpus=[0, 1, 2],
required_gpus=1,
use_core_strategy=[True, False]))
def testComplexModel(self, num_gpus, use_core_strategy):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
self._run_between_graph_clients(
self._test_complex_model,
self._cluster_spec,
num_gpus=num_gpus,
use_core_strategy=use_core_strategy)
# TODO(yuefengz): Update how we use num_gpus and required_gpus
@combinations.generate(
combinations.combine(
mode=['graph'],
num_gpus=[0, 1, 2],
required_gpus=1,
use_dataset=[True, False],
use_core_strategy=[True, False]))
def testMakeInputFnIterator(self, num_gpus, use_dataset, use_core_strategy):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
# We use CPU as the device when num_gpus = 0
devices_per_worker = max(1, num_gpus)
expected_values = [[i+j for j in range(devices_per_worker)]
for i in range(0, 100, devices_per_worker)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=3*devices_per_worker,
expected_num_input_pipelines=3,
expected_input_pipeline_id=1) # because task_id = 1
self._test_input_fn_iterator(
'worker',
1,
num_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
ignore_order=not use_dataset,
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testUpdateConfigProto(self, use_core_strategy):
strategy, _, _ = self._get_test_object(
task_type='worker',
task_id=1,
num_gpus=2,
use_core_strategy=use_core_strategy)
config_proto = config_pb2.ConfigProto(device_filters=['to_be_overridden'])
rewrite_options = config_proto.graph_options.rewrite_options
rewrite_options.scoped_allocator_opts.enable_op.append('to_be_removed')
new_config = strategy.update_config_proto(config_proto)
# Verify group leader
self.assertEqual('/job:worker/replica:0/task:0',
new_config.experimental.collective_group_leader)
# Verify device filters.
self.assertEqual(['/job:worker/task:1'], new_config.device_filters)
# Verify rewrite options.
new_rewrite_options = new_config.graph_options.rewrite_options
self.assertEqual(rewriter_config_pb2.RewriterConfig.ON,
new_rewrite_options.scoped_allocator_optimization)
self.assertEqual(['CollectiveReduce'],
new_rewrite_options.scoped_allocator_opts.enable_op)
@combinations.generate(combinations.combine(mode=['eager']))
def testEnableCollectiveOps(self):
mock_called = [False]
# pylint: disable=dangerous-default-value
def mock_enable_collective_ops(server_def, mock_called=mock_called):
self.assertEqual('worker', server_def.job_name)
self.assertEqual(1, server_def.task_index)
self.assertEqual('grpc', server_def.protocol)
mock_called[0] = True
def mock_configure_collective_ops(*args, **kwargs):
del args, kwargs
with test.mock.patch.object(context.context(), 'enable_collective_ops',
mock_enable_collective_ops), \
test.mock.patch.object(context.context(), 'configure_collective_ops',
mock_configure_collective_ops):
strategy, _, _ = self._get_test_object(
task_type='worker', task_id=1, num_gpus=2, use_core_strategy=True)
self.assertTrue(strategy.extended._std_server_started)
self.assertTrue(mock_called[0])
class DistributedCollectiveAllReduceStrategyTestWithChief(
CollectiveAllReduceStrategyTestBase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0, has_chief=True)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testMinimizeLossGraph(self, num_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testVariableInitialization(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_variable_initialization,
self._cluster_spec,
num_gpus=num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testComplexModel(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_complex_model, self._cluster_spec, num_gpus=num_gpus)
class LocalCollectiveAllReduceStrategy(
CollectiveAllReduceStrategyTestBase,
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
num_gpus=[2, 4],
required_gpus=2,
use_core_strategy=[True, False]))
def testMinimizeLoss(self, num_gpus, use_core_strategy):
# Collective ops doesn't support strategy with one device.
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if context.executing_eagerly():
strategy, _, _ = self._get_test_object(
None, None, num_gpus, use_core_strategy=use_core_strategy)
self._test_minimize_loss_eager(strategy)
else:
self._test_minimize_loss_graph(
None, None, num_gpus, use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph'],
num_gpus=[2, 4],
required_gpus=2,
use_core_strategy=[True, False]))
def testComplexModel(self, num_gpus, use_core_strategy):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
self._test_complex_model(
None, None, num_gpus, use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
required_gpus=2,
use_dataset=[True, False],
use_core_strategy=[True, False]))
def testMakeInputFnIterator(self, use_dataset, use_core_strategy):
num_gpus = 2
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(5 * num_gpus)
else:
def fn():
dataset = dataset_ops.Dataset.range(5 * num_gpus)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [range(i, i + num_gpus) for i in range(0, 10, num_gpus)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_gpus,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterator(
None,
None,
num_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
ignore_order=not use_dataset,
use_core_strategy=use_core_strategy)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testAllReduceSum(self, use_core_strategy):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(
None, None, num_gpus=2, use_core_strategy=use_core_strategy)
with self.cached_session(config=config, target=target):
self._test_all_reduce_sum(distribution)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testAllReduceSumGradients(self, use_core_strategy):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(
None, None, num_gpus=2, use_core_strategy=use_core_strategy)
with self.cached_session(config=config, target=target):
self._test_all_reduce_sum_gradients(distribution)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testAllReduceSumGradientTape(self, use_core_strategy):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(
None, None, num_gpus=2, use_core_strategy=use_core_strategy)
with self.cached_session(config=config, target=target):
self._test_all_reduce_sum_gradient_tape(distribution)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testAllReduceMean(self, use_core_strategy):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(
None, None, num_gpus=2, use_core_strategy=use_core_strategy)
with self.cached_session(config=config, target=target):
self._test_all_reduce_mean(distribution)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testAllReduceMeanGradients(self, use_core_strategy):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(
None, None, num_gpus=2, use_core_strategy=use_core_strategy)
with self.cached_session(config=config, target=target):
self._test_all_reduce_mean_gradients(distribution)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testAllReduceMeanGradientTape(self, use_core_strategy):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(
None, None, num_gpus=2, use_core_strategy=use_core_strategy)
with self.cached_session(config=config, target=target):
self._test_all_reduce_mean_gradient_tape(distribution)
@combinations.generate(
combinations.combine(mode=['graph'], use_core_strategy=[True, False]))
def testNumpyIterator(self, use_core_strategy):
num_gpus = 2
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
strategy, _, _ = self._get_test_object(
None, None, num_gpus=num_gpus, use_core_strategy=use_core_strategy)
self._test_numpy_iterator(strategy)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/collective_all_reduce_strategy_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test base for tf.keras Models in multi-worker mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
# pylint: disable=g-direct-tensorflow-import
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy as collective_strategy
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import context
from tensorflow.python.platform import test
_original_run_std_server = dc._run_std_server # pylint: disable=protected-access
# Used as a decorator on test methods.
run_sync_strategies = combinations.generate(
combinations.combine(
mode=['graph'],
strategy_cls=[
collective_strategy.CollectiveAllReduceStrategy,
],
required_gpus=[0, 1]))
# Used as a decorator on test methods.
run_async_strategies = combinations.generate(
combinations.combine(
mode=['graph'],
strategy_cls=[parameter_server_strategy.ParameterServerStrategy],
required_gpus=[0, 1]))
def get_strategy_object(strategy_cls):
return strategy_cls(num_gpus_per_worker=context.num_gpus())
# TODO(omalleyt): Merge with keras_multiworker_callback_test
class KerasIndependentWorkerTestBase(
multi_worker_test_base.IndependentWorkerTestBase):
"""Test base for simulating Keras Multi-Worker in threads."""
def _make_mock_run_std_server(self):
thread_local = threading.local()
def _mock_run_std_server(*args, **kwargs):
ret = _original_run_std_server(*args, **kwargs)
# Wait for all std servers to be brought up in order to reduce the chance
# of remote sessions taking local ports that have been assigned to std
# servers. Only call this barrier the first time this function is run for
# each thread.
if not getattr(thread_local, 'server_started', False):
self._barrier.wait()
thread_local.server_started = True
return ret
return _mock_run_std_server
def run_independent_workers(self,
worker_fn,
strategy_cls,
num_workers,
num_ps=None,
**kwargs):
cluster_spec = multi_worker_test_base.create_cluster_spec(
num_workers=num_workers, num_ps=num_ps, test_obj=self)
self._barrier = dc._Barrier(num_workers + (num_ps or 0)) # pylint: disable=protected-access
def _worker_fn(**kwargs):
"""Runs the worker function in a thread."""
with test.mock.patch.object(dc, '_run_std_server',
self._make_mock_run_std_server()):
strategy = get_strategy_object(strategy_cls)
with strategy.scope():
return worker_fn(**kwargs)
threads = self.run_multiple_tasks_in_threads(_worker_fn, cluster_spec,
**kwargs)
strategy = get_strategy_object(strategy_cls)
if strategy.extended.experimental_between_graph:
threads_to_join = threads.get('chief', []) + threads.get('worker', [])
else:
threads_to_join = [
threads['chief'][0] if 'chief' in threads else threads['worker'][0]
]
self.join_independent_workers(threads_to_join)
|
tensorflow-master
|
tensorflow/contrib/distribute/python/keras_multi_worker_test_base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Distribution Strategy.
This is experimental. It's not ready for general use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.distribute.tpu_strategy import TPUStrategyV1 as TPUStrategy
from tensorflow.python.tpu.tpu_strategy_util import initialize_tpu_system
|
tensorflow-master
|
tensorflow/contrib/distribute/python/tpu_strategy.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class OneDeviceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import one_device_strategy
OneDeviceStrategy = one_device_strategy.OneDeviceStrategyV1
|
tensorflow-master
|
tensorflow/contrib/distribute/python/one_device_strategy.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple example to test the a DistributionStrategy with Estimators.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.keras import metrics as metrics_module
def build_model_fn_optimizer():
"""Simple model_fn with optimizer."""
# TODO(anjalisridhar): Move this inside the model_fn once OptimizerV2 is
# done?
optimizer = tf.train.GradientDescentOptimizer(0.2)
def model_fn(features, labels, mode): # pylint: disable=unused-argument
"""model_fn which uses a single unit Dense layer."""
# You can also use the Flatten layer if you want to test a model without any
# weights.
layer = tf.layers.Dense(1, use_bias=True)
logits = layer(features)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"logits": logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
def loss_fn():
y = tf.reshape(logits, []) - tf.constant(1.)
return y * y
if mode == tf.estimator.ModeKeys.EVAL:
acc_obj = metrics_module.BinaryAccuracy()
acc_obj.update_state(labels, labels)
return tf.estimator.EstimatorSpec(
mode, loss=loss_fn(), eval_metric_ops={"Accuracy": acc_obj})
assert mode == tf.estimator.ModeKeys.TRAIN
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss_fn(), global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss_fn(), train_op=train_op)
return model_fn
def main(_):
distribution = tf.contrib.distribute.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1"])
config = tf.estimator.RunConfig(train_distribute=distribution,
eval_distribute=distribution)
# Since there are 2 devices and 10 samples, we set steps=5.
steps = 5
def train_input_fn():
features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)
labels = tf.data.Dataset.from_tensors([1.]).repeat(10)
return tf.data.Dataset.zip((features, labels))
estimator = tf.estimator.Estimator(
model_fn=build_model_fn_optimizer(), config=config)
estimator.train(input_fn=train_input_fn, steps=steps)
def eval_input_fn():
features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)
labels = tf.data.Dataset.from_tensors([1.]).repeat(10)
return tf.data.Dataset.zip((features, labels))
eval_result = estimator.evaluate(input_fn=eval_input_fn, steps=steps)
print("Eval result: {}".format(eval_result))
assert eval_result["Accuracy"] == 1.0
def predict_input_fn():
predict_features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)
return predict_features
prediction_iterable = estimator.predict(input_fn=predict_input_fn)
# Create a list containing each of the prediction dictionaries that map
# the key 'logits' to an array of model outputs.
predictions = [prediction_iterable.next() for _ in range(10)]
print("Prediction results: {}".format(predictions))
if __name__ == "__main__":
tf.app.run()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/examples/simple_estimator_example.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training tf.keras Model using MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
def input_fn():
x = np.random.random((1024, 10))
y = np.random.randint(2, size=(1024, 1))
x = tf.cast(x, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(10)
dataset = dataset.batch(32)
return dataset
def main(args):
if len(args) < 2:
print('You must specify model_dir for checkpoints such as'
' /tmp/tfkeras_example/.')
return
model_dir = args[1]
print('Using %s to store checkpoints.' % model_dir)
# Define a Keras Model.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(16, activation='relu', input_shape=(10,)))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
# Compile the model.
optimizer = tf.train.GradientDescentOptimizer(0.2)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.summary()
tf.keras.backend.set_learning_phase(True)
# Define a DistributionStrategy and convert the Keras Model to an
# Estimator that utilizes the DistributionStrategy.
strategy = tf.contrib.distribute.MirroredStrategy(
['/device:GPU:0', '/device:GPU:1'])
config = tf.estimator.RunConfig(
train_distribute=strategy, eval_distribute=strategy)
keras_estimator = tf.keras.estimator.model_to_estimator(
keras_model=model, config=config, model_dir=model_dir)
# Train and evaluate the model.
keras_estimator.train(input_fn=input_fn, steps=10)
eval_result = keras_estimator.evaluate(input_fn=input_fn)
print('Eval result: {}'.format(eval_result))
if __name__ == '__main__':
tf.app.run(argv=sys.argv)
|
tensorflow-master
|
tensorflow/contrib/distribute/python/examples/keras_model_with_estimator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example training a Keras Model using MirroredStrategy and native APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.keras.optimizer_v2 import rmsprop
NUM_CLASSES = 10
def get_input_datasets(use_bfloat16=False):
"""Downloads the MNIST dataset and creates train and eval dataset objects.
Args:
use_bfloat16: Boolean to determine if input should be cast to bfloat16
Returns:
Train dataset, eval dataset and input shape.
"""
# input image dimensions
img_rows, img_cols = 28, 28
cast_dtype = tf.bfloat16 if use_bfloat16 else tf.float32
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
if tf.keras.backend.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)
# train dataset
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.repeat()
train_ds = train_ds.map(lambda x, y: (tf.cast(x, cast_dtype), y))
train_ds = train_ds.batch(64, drop_remainder=True)
# eval dataset
eval_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
eval_ds = eval_ds.repeat()
eval_ds = eval_ds.map(lambda x, y: (tf.cast(x, cast_dtype), y))
eval_ds = eval_ds.batch(64, drop_remainder=True)
return train_ds, eval_ds, input_shape
def get_model(input_shape):
"""Builds a Sequential CNN model to recognize MNIST digits.
Args:
input_shape: Shape of the input depending on the `image_data_format`.
Returns:
a Keras model
"""
# Define a CNN model to recognize MNIST digits.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
return model
def main(_):
# Build the train and eval datasets from the MNIST data. Also return the
# input shape which is constructed based on the `image_data_format`
# i.e channels_first or channels_last.
tf.enable_eager_execution()
train_ds, eval_ds, input_shape = get_input_datasets()
# Instantiate the MirroredStrategy object. If we don't specify `num_gpus` or
# the `devices` argument then all the GPUs available on the machine are used.
# TODO(priyag): Use `tf.distribute.MirroredStrategy` once available.
strategy = mirrored_strategy.MirroredStrategy(['/gpu:0', '/cpu:0'])
# Create and compile the model under Distribution strategy scope.
# `fit`, `evaluate` and `predict` will be distributed based on the strategy
# model was compiled with.
with strategy.scope():
model = get_model(input_shape)
optimizer = rmsprop.RMSProp(learning_rate=0.001)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=optimizer,
metrics=['accuracy'])
# Train the model with the train dataset.
model.fit(x=train_ds, epochs=20, steps_per_epoch=468)
# Evaluate the model with the eval dataset.
score = model.evaluate(eval_ds, steps=10, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
if __name__ == '__main__':
tf.app.run()
|
tensorflow-master
|
tensorflow/contrib/distribute/python/examples/keras_mnist.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run MNIST on multiple GPUs on using MirroredStrategy with eager execution.
By default, runs on all available GPUs, or CPU if no GPUs are available.
NOTE: Currently, this takes more time than when running MNIST in eager without
MirroredStrategy because of a number overheads. Therefore, this is just a
proof of concept right now and cannot be used to actually scale up training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
flags.DEFINE_string("tpu", None, "Name of the TPU to use.")
flags.DEFINE_integer("batch_size", 64,
"What should be the size of each batch?")
flags.DEFINE_integer("num_epochs", 10, "How many epochs to run?")
flags.DEFINE_float("learning_rate", 0.01, "Learning Rate")
flags.DEFINE_float("momentum", 0.5, "SGD momentum")
FLAGS = flags.FLAGS
NUM_TRAIN_IMAGES = 60000
def create_model():
max_pool = tf.keras.layers.MaxPooling2D((2, 2), (2, 2), padding="same")
# The model consists of a sequential chain of layers, so tf.keras.Sequential
# (a subclass of tf.keras.Model) makes for a compact description.
return tf.keras.Sequential([
tf.keras.layers.Reshape(
target_shape=[28, 28, 1],
input_shape=(28, 28,)),
tf.keras.layers.Conv2D(2, 5, padding="same", activation=tf.nn.relu),
max_pool,
tf.keras.layers.Conv2D(4, 5, padding="same", activation=tf.nn.relu),
max_pool,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(10)])
def compute_loss(logits, labels):
loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
# Scale loss by global batch size.
return loss * (1. / FLAGS.batch_size)
def mnist_datasets():
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Numpy defaults to dtype=float64; TF defaults to float32. Stick with float32.
x_train, x_test = x_train / np.float32(255), x_test / np.float32(255)
y_train, y_test = y_train.astype(np.int64), y_test.astype(np.int64)
# TODO(priyag): `strategy.make_numpy_iterator` can be used directly instead of
# converting to datasets.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
return train_dataset, test_dataset
def main(argv):
"""Run a CNN model on MNIST data to demonstrate DistributedStrategies."""
del argv # Unused.
tf.disable_v2_behavior()
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu)
strategy = tf.contrib.distribute.TPUStrategy(cluster_resolver)
with strategy.scope():
train_ds, test_ds = mnist_datasets()
train_ds = train_ds.shuffle(NUM_TRAIN_IMAGES).batch(FLAGS.batch_size)
test_ds = test_ds.batch(FLAGS.batch_size)
model = create_model()
optimizer = tf.keras.optimizers.SGD(FLAGS.learning_rate, FLAGS.momentum)
training_loss = tf.keras.metrics.Mean("training_loss", dtype=tf.float32)
training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
"training_accuracy", dtype=tf.float32)
test_loss = tf.keras.metrics.Mean("test_loss", dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
"test_accuracy", dtype=tf.float32)
def train_step(inputs): # pylint: disable=missing-docstring
images, labels = inputs
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = compute_loss(logits, labels)
grads = tape.gradient(loss, model.variables)
update_vars = optimizer.apply_gradients(zip(grads, model.variables))
update_loss = training_loss.update_state(loss)
update_accuracy = training_accuracy.update_state(labels, logits)
with tf.control_dependencies([update_vars, update_loss, update_accuracy]):
return tf.identity(loss)
def test_step(inputs):
images, labels = inputs
logits = model(images, training=False)
loss = compute_loss(logits, labels)
update_loss = test_loss.update_state(loss)
update_accuracy = test_accuracy.update_state(labels, logits)
with tf.control_dependencies([update_loss, update_accuracy]):
return tf.identity(loss)
train_iterator = strategy.make_dataset_iterator(train_ds)
test_iterator = strategy.make_dataset_iterator(test_ds)
dist_train = strategy.unwrap(
strategy.experimental_run(train_step, train_iterator))
dist_test = strategy.unwrap(
strategy.experimental_run(test_step, test_iterator))
training_loss_result = training_loss.result()
training_accuracy_result = training_accuracy.result()
test_loss_result = test_loss.result()
test_accuracy_result = test_accuracy.result()
tf.contrib.distribute.initialize_tpu_system(cluster_resolver)
train_iterator_init = train_iterator.initialize()
test_iterator_init = test_iterator.initialize()
all_variables = (
tf.global_variables() +
training_loss.variables +
training_accuracy.variables +
test_loss.variables +
test_accuracy.variables)
with tf.Session(cluster_resolver.master()) as session:
session.run([v.initializer for v in all_variables])
for epoch in range(0, FLAGS.num_epochs):
# Train
print("Starting epoch {}".format(epoch))
session.run(train_iterator_init)
while True:
try:
session.run(dist_train)
except tf.errors.OutOfRangeError:
break
print("Training loss: {:0.4f}, accuracy: {:0.2f}%".format(
session.run(training_loss_result),
session.run(training_accuracy_result) * 100))
training_loss.reset_states()
training_accuracy.reset_states()
# Test
session.run(test_iterator_init)
while True:
try:
session.run(dist_test)
except tf.errors.OutOfRangeError:
break
print("Test loss: {:0.4f}, accuracy: {:0.2f}%".format(
session.run(test_loss_result),
session.run(test_accuracy_result) * 100))
test_loss.reset_states()
test_accuracy.reset_states()
if __name__ == "__main__":
flags.mark_flag_as_required("tpu")
app.run(main)
|
tensorflow-master
|
tensorflow/contrib/distribute/python/examples/mnist_tf1_tpu.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run MNIST on multiple GPUs on using MirroredStrategy with eager execution.
By default, runs on all available GPUs, or CPU if no GPUs are available.
NOTE: Currently, this takes more time than when running MNIST in eager without
MirroredStrategy because of a number overheads. Therefore, this is just a
proof of concept right now and cannot be used to actually scale up training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
flags.DEFINE_integer("num_gpus", None, "How many GPUs should we run on?"
"Defaults to all available GPUs, otherwise CPU.")
flags.DEFINE_integer("batch_size", 64,
"What should be the size of each batch?")
flags.DEFINE_integer("num_epochs", 10, "How many epochs to run?")
flags.DEFINE_float("learning_rate", 0.01, "Learning Rate")
flags.DEFINE_float("momentum", 0.5, "SGD momentum")
FLAGS = flags.FLAGS
NUM_TRAIN_IMAGES = 60000
def create_model():
max_pool = tf.keras.layers.MaxPooling2D((2, 2), (2, 2), padding="same")
# The model consists of a sequential chain of layers, so tf.keras.Sequential
# (a subclass of tf.keras.Model) makes for a compact description.
return tf.keras.Sequential([
tf.keras.layers.Reshape(
target_shape=[28, 28, 1],
input_shape=(28, 28,)),
tf.keras.layers.Conv2D(2, 5, padding="same", activation=tf.nn.relu),
max_pool,
tf.keras.layers.Conv2D(4, 5, padding="same", activation=tf.nn.relu),
max_pool,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(10)])
def compute_loss(logits, labels):
loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
# Scale loss by global batch size.
return loss * (1. / FLAGS.batch_size)
def mnist_datasets(strategy):
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Numpy defaults to dtype=float64; TF defaults to float32. Stick with float32.
x_train, x_test = x_train / np.float32(255), x_test / np.float32(255)
y_train, y_test = y_train.astype(np.int64), y_test.astype(np.int64)
train_dataset = strategy.experimental_make_numpy_dataset((x_train, y_train))
test_dataset = strategy.experimental_make_numpy_dataset((x_test, y_test))
return train_dataset, test_dataset
def main(unused_argv):
"""Run a CNN model on MNIST data to demonstrate DistributedStrategies."""
tf.enable_v2_behavior()
num_gpus = FLAGS.num_gpus
if num_gpus is None:
devices = None
elif num_gpus == 0:
devices = ["/device:CPU:0"]
else:
devices = ["/device:GPU:{}".format(i) for i in range(num_gpus)]
strategy = tf.distribute.MirroredStrategy(devices)
with strategy.scope():
train_ds, test_ds = mnist_datasets(strategy)
train_ds = train_ds.shuffle(NUM_TRAIN_IMAGES).batch(FLAGS.batch_size)
test_ds = test_ds.batch(FLAGS.batch_size)
model = create_model()
optimizer = tf.keras.optimizers.SGD(FLAGS.learning_rate, FLAGS.momentum)
training_loss = tf.keras.metrics.Mean("training_loss", dtype=tf.float32)
training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
"training_accuracy", dtype=tf.float32)
test_loss = tf.keras.metrics.Mean("test_loss", dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
"test_accuracy", dtype=tf.float32)
@tf.function
def train_epoch(train_dist_dataset):
"""Training Step."""
def step_fn(images, labels):
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = compute_loss(logits, labels)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
training_loss.update_state(loss)
training_accuracy.update_state(labels, logits)
for images, labels in train_dist_dataset:
strategy.experimental_run_v2(step_fn, args=(images, labels))
@tf.function
def test_epoch(test_dist_dataset):
"""Testing Step."""
def step_fn(images, labels):
logits = model(images, training=False)
loss = compute_loss(logits, labels)
test_loss.update_state(loss)
test_accuracy.update_state(labels, logits)
for images, labels in test_dist_dataset:
strategy.experimental_run_v2(step_fn, args=(images, labels))
train_dist_dataset = strategy.experimental_distribute_dataset(train_ds)
test_dist_dataset = strategy.experimental_distribute_dataset(test_ds)
for epoch in range(FLAGS.num_epochs):
# Train
print("Starting epoch {}".format(epoch))
train_epoch(train_dist_dataset)
print("Training loss: {:0.4f}, accuracy: {:0.2f}%".format(
training_loss.result(), training_accuracy.result() * 100))
training_loss.reset_states()
training_accuracy.reset_states()
# Test
test_epoch(test_dist_dataset)
print("Test loss: {:0.4f}, accuracy: {:0.2f}%".format(
test_loss.result(), test_accuracy.result() * 100))
test_loss.reset_states()
test_accuracy.reset_states()
if __name__ == "__main__":
app.run(main)
|
tensorflow-master
|
tensorflow/contrib/distribute/python/examples/mnist_eager_multigpu.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exporter.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver
FLAGS = flags.FLAGS
GLOBAL_STEP = 222
def tearDownModule():
gfile.DeleteRecursively(test.get_temp_dir())
class SaveRestoreShardedTest(test.TestCase):
def doBasicsOneExportPath(self,
export_path,
clear_devices=False,
global_step=GLOBAL_STEP,
sharded=True,
export_count=1):
# Build a graph with 2 parameter nodes on different devices.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# v2 is an unsaved variable derived from v0 and v1. It is used to
# exercise the ability to run an init op when restoring a graph.
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(10, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(20, name="v1")
v2 = variables.VariableV1(1, name="v2", trainable=False, collections=[])
assign_v2 = state_ops.assign(v2, math_ops.add(v0, v1))
init_op = control_flow_ops.group(assign_v2, name="init_op")
ops.add_to_collection("v", v0)
ops.add_to_collection("v", v1)
ops.add_to_collection("v", v2)
named_tensor_bindings = {"logical_input_A": v0, "logical_input_B": v1}
signatures = {
"foo":
exporter.regression_signature(
input_tensor=v0, output_tensor=v1),
"generic":
exporter.generic_signature(named_tensor_bindings)
}
asset_filepath_orig = os.path.join(test.get_temp_dir(), "hello42.txt")
asset_file = constant_op.constant(asset_filepath_orig, name="filename42")
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file)
with gfile.GFile(asset_filepath_orig, "w") as f:
f.write("your data here")
assets_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
ignored_asset = os.path.join(test.get_temp_dir(), "ignored.txt")
with gfile.GFile(ignored_asset, "w") as f:
f.write("additional data here")
variables.global_variables_initializer().run()
# Run an export.
save = saver.Saver(
{
"v0": v0,
"v1": v1
},
restore_sequentially=True,
sharded=sharded,
write_version=saver_pb2.SaverDef.V1)
export = exporter.Exporter(save)
compare_def = ops.get_default_graph().as_graph_def()
export.init(
compare_def,
init_op=init_op,
clear_devices=clear_devices,
default_graph_signature=exporter.classification_signature(
input_tensor=v0),
named_graph_signatures=signatures,
assets_collection=assets_collection)
for x in range(export_count):
export.export(
export_path,
constant_op.constant(global_step + x),
sess,
exports_to_keep=gc.largest_export_versions(2))
# Set global_step to the last exported version, as the rest of the test
# uses it to construct model export path, loads model from it, and does
# verifications. We want to make sure to always use the last exported
# version, as old ones may have be garbage-collected.
global_step += export_count - 1
# Restore graph.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
save = saver.import_meta_graph(
os.path.join(export_path, constants.VERSION_FORMAT_SPECIFIER %
global_step, constants.META_GRAPH_DEF_FILENAME))
self.assertIsNotNone(save)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
# Validate custom graph_def.
graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value
self.assertEquals(len(graph_def_any), 1)
graph_def = graph_pb2.GraphDef()
graph_def_any[0].Unpack(graph_def)
if clear_devices:
for node in compare_def.node:
node.device = ""
self.assertProtoEquals(compare_def, graph_def)
# Validate init_op.
init_ops = collection_def[constants.INIT_OP_KEY].node_list.value
self.assertEquals(len(init_ops), 1)
self.assertEquals(init_ops[0], "init_op")
# Validate signatures.
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
self.assertEqual(
default_signature.classification_signature.input.tensor_name, "v0:0")
bindings = signatures.named_signatures["generic"].generic_signature.map
self.assertEquals(bindings["logical_input_A"].tensor_name, "v0:0")
self.assertEquals(bindings["logical_input_B"].tensor_name, "v1:0")
read_foo_signature = (
signatures.named_signatures["foo"].regression_signature)
self.assertEquals(read_foo_signature.input.tensor_name, "v0:0")
self.assertEquals(read_foo_signature.output.tensor_name, "v1:0")
# Validate the assets.
assets_any = collection_def[constants.ASSETS_KEY].any_list.value
self.assertEquals(len(assets_any), 1)
asset = manifest_pb2.AssetFile()
assets_any[0].Unpack(asset)
assets_path = os.path.join(export_path,
constants.VERSION_FORMAT_SPECIFIER %
global_step, constants.ASSETS_DIRECTORY,
"hello42.txt")
asset_contents = gfile.GFile(assets_path).read()
self.assertEqual(asset_contents, "your data here")
self.assertEquals("hello42.txt", asset.filename)
self.assertEquals("filename42:0", asset.tensor_binding.tensor_name)
ignored_asset_path = os.path.join(export_path,
constants.VERSION_FORMAT_SPECIFIER %
global_step, constants.ASSETS_DIRECTORY,
"ignored.txt")
self.assertFalse(gfile.Exists(ignored_asset_path))
# Validate graph restoration.
if sharded:
save.restore(sess,
os.path.join(export_path,
constants.VERSION_FORMAT_SPECIFIER %
global_step,
constants.VARIABLES_FILENAME_PATTERN))
else:
save.restore(sess,
os.path.join(export_path,
constants.VERSION_FORMAT_SPECIFIER %
global_step, constants.VARIABLES_FILENAME))
self.assertEqual(10, ops.get_collection("v")[0].eval())
self.assertEqual(20, ops.get_collection("v")[1].eval())
ops.get_collection(constants.INIT_OP_KEY)[0].run()
self.assertEqual(30, ops.get_collection("v")[2].eval())
def testDuplicateExportRaisesError(self):
export_path = os.path.join(test.get_temp_dir(), "export_duplicates")
self.doBasicsOneExportPath(export_path)
self.assertRaises(RuntimeError, self.doBasicsOneExportPath, export_path)
def testBasics(self):
export_path = os.path.join(test.get_temp_dir(), "export")
self.doBasicsOneExportPath(export_path)
def testBasicsNoShard(self):
export_path = os.path.join(test.get_temp_dir(), "export_no_shard")
self.doBasicsOneExportPath(export_path, sharded=False)
def testClearDevice(self):
export_path = os.path.join(test.get_temp_dir(), "export_clear_device")
self.doBasicsOneExportPath(export_path, clear_devices=True)
def testGC(self):
export_path = os.path.join(test.get_temp_dir(), "gc")
self.doBasicsOneExportPath(export_path, global_step=100)
self.assertEquals(gfile.ListDirectory(export_path), ["00000100"])
self.doBasicsOneExportPath(export_path, global_step=101)
self.assertEquals(
sorted(gfile.ListDirectory(export_path)), ["00000100", "00000101"])
self.doBasicsOneExportPath(export_path, global_step=102)
self.assertEquals(
sorted(gfile.ListDirectory(export_path)), ["00000101", "00000102"])
def testExportMultipleTimes(self):
export_path = os.path.join(test.get_temp_dir(), "export_multiple_times")
self.doBasicsOneExportPath(export_path, export_count=10)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/session_bundle/exporter_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print(every_fifth(path_list)) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print(largest_three(all_paths)) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print(both(all_paths)) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.rmtree(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Path = collections.namedtuple('Path', 'path export_version')
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def one_of_every_n_export_versions(n):
r"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(base_dir, r), None))
if p:
paths.append(p)
return sorted(paths)
|
tensorflow-master
|
tensorflow/contrib/session_bundle/gc.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Importer for an exported TensorFlow model.
This module provides a function to create a SessionBundle containing both the
Session and MetaGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def maybe_session_bundle_dir(export_dir):
"""Checks if the model path contains session bundle model.
Args:
export_dir: string path to model checkpoint, for example 'model/00000123'
Returns:
true if path contains session bundle model files, ie META_GRAPH_DEF_FILENAME
"""
meta_graph_filename = os.path.join(export_dir,
constants.META_GRAPH_DEF_FILENAME)
return file_io.file_exists(meta_graph_filename)
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def load_session_bundle_from_path(export_dir,
target="",
config=None,
meta_graph_def=None):
"""Load session bundle from the given path.
The function reads input from the export_dir, constructs the graph data to the
default graph and restores the parameters for the session created.
Args:
export_dir: the directory that contains files exported by exporter.
target: The execution engine to connect to. See target in
tf.compat.v1.Session()
config: A ConfigProto proto with configuration options. See config in
tf.compat.v1.Session()
meta_graph_def: optional object of type MetaGraphDef. If this object is
present, then it is used instead of parsing MetaGraphDef from export_dir.
Returns:
session: a tensorflow session created from the variable files.
meta_graph: a meta graph proto saved in the exporter directory.
Raises:
RuntimeError: if the required files are missing or contain unrecognizable
fields, i.e. the exported model is invalid.
"""
if not meta_graph_def:
meta_graph_filename = os.path.join(export_dir,
constants.META_GRAPH_DEF_FILENAME)
if not file_io.file_exists(meta_graph_filename):
raise RuntimeError("Expected meta graph file missing %s" %
meta_graph_filename)
# Reads meta graph file.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
meta_graph_def.ParseFromString(
file_io.read_file_to_string(meta_graph_filename, binary_mode=True))
variables_filename = ""
variables_filename_list = []
checkpoint_sharded = False
variables_index_filename = os.path.join(export_dir,
constants.VARIABLES_INDEX_FILENAME_V2)
checkpoint_v2 = file_io.file_exists(variables_index_filename)
# Find matching checkpoint files.
if checkpoint_v2:
# The checkpoint is in v2 format.
variables_filename_pattern = os.path.join(
export_dir, constants.VARIABLES_FILENAME_PATTERN_V2)
variables_filename_list = file_io.get_matching_files(
variables_filename_pattern)
checkpoint_sharded = True
else:
variables_filename = os.path.join(export_dir, constants.VARIABLES_FILENAME)
if file_io.file_exists(variables_filename):
variables_filename_list = [variables_filename]
else:
variables_filename = os.path.join(export_dir,
constants.VARIABLES_FILENAME_PATTERN)
variables_filename_list = file_io.get_matching_files(variables_filename)
checkpoint_sharded = True
# Prepare the files to restore a session.
if not variables_filename_list:
restore_files = ""
elif checkpoint_v2 or not checkpoint_sharded:
# For checkpoint v2 or v1 with non-sharded files, use "export" to restore
# the session.
restore_files = constants.VARIABLES_FILENAME
else:
restore_files = constants.VARIABLES_FILENAME_PATTERN
assets_dir = os.path.join(export_dir, constants.ASSETS_DIRECTORY)
collection_def = meta_graph_def.collection_def
graph_def = graph_pb2.GraphDef()
if constants.GRAPH_KEY in collection_def:
# Use serving graph_def in MetaGraphDef collection_def if exists
graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value
if len(graph_def_any) != 1:
raise RuntimeError("Expected exactly one serving GraphDef in : %s" %
meta_graph_def)
else:
graph_def_any[0].Unpack(graph_def)
# Replace the graph def in meta graph proto.
meta_graph_def.graph_def.CopyFrom(graph_def)
ops.reset_default_graph()
sess = session.Session(target, graph=None, config=config)
# Import the graph.
saver = saver_lib.import_meta_graph(meta_graph_def)
# Restore the session.
if restore_files:
saver.restore(sess, os.path.join(export_dir, restore_files))
init_op_tensor = None
if constants.INIT_OP_KEY in collection_def:
init_ops = collection_def[constants.INIT_OP_KEY].node_list.value
if len(init_ops) != 1:
raise RuntimeError("Expected exactly one serving init op in : %s" %
meta_graph_def)
init_op_tensor = ops.get_collection(constants.INIT_OP_KEY)[0]
# Create asset input tensor list.
asset_tensor_dict = {}
if constants.ASSETS_KEY in collection_def:
assets_any = collection_def[constants.ASSETS_KEY].any_list.value
for asset in assets_any:
asset_pb = manifest_pb2.AssetFile()
asset.Unpack(asset_pb)
asset_tensor_dict[asset_pb.tensor_binding.tensor_name] = os.path.join(
assets_dir, asset_pb.filename)
if init_op_tensor:
# Run the init op.
sess.run(fetches=[init_op_tensor], feed_dict=asset_tensor_dict)
return sess, meta_graph_def
|
tensorflow-master
|
tensorflow/contrib/session_bundle/session_bundle.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export a TensorFlow model.
See: go/tf-exporter
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import six
from google.protobuf.any_pb2 import Any
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import gc
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def gfile_copy_callback(files_to_copy, export_dir_path):
"""Callback to copy files using `gfile.copy` to an export directory.
This method is used as the default `assets_callback` in `Exporter.init` to
copy assets from the `assets_collection`. It can also be invoked directly to
copy additional supplementary files into the export directory (in which case
it is not a callback).
Args:
files_to_copy: A dictionary that maps original file paths to desired
basename in the export directory.
export_dir_path: Directory to copy the files to.
"""
logging.info("Write assets into: %s using gfile_copy.", export_dir_path)
gfile.MakeDirs(export_dir_path)
for source_filepath, basename in files_to_copy.items():
new_path = os.path.join(
compat.as_bytes(export_dir_path), compat.as_bytes(basename))
logging.info("Copying asset %s to path %s.", source_filepath, new_path)
if gfile.Exists(new_path):
# Guard against being restarted while copying assets, and the file
# existing and being in an unknown state.
# TODO(b/28676216): Do some file checks before deleting.
logging.info("Removing file %s.", new_path)
gfile.Remove(new_path)
gfile.Copy(source_filepath, new_path)
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def regression_signature(input_tensor, output_tensor):
"""Creates a regression signature.
Args:
input_tensor: Tensor specifying the input to a graph.
output_tensor: Tensor specifying the output of a graph.
Returns:
A Signature message.
"""
signature = manifest_pb2.Signature()
signature.regression_signature.input.tensor_name = input_tensor.name
signature.regression_signature.output.tensor_name = output_tensor.name
return signature
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def classification_signature(input_tensor,
classes_tensor=None,
scores_tensor=None):
"""Creates a classification signature.
Args:
input_tensor: Tensor specifying the input to a graph.
classes_tensor: Tensor specifying the output classes of a graph.
scores_tensor: Tensor specifying the scores of the output classes.
Returns:
A Signature message.
"""
signature = manifest_pb2.Signature()
signature.classification_signature.input.tensor_name = input_tensor.name
if classes_tensor is not None:
signature.classification_signature.classes.tensor_name = classes_tensor.name
if scores_tensor is not None:
signature.classification_signature.scores.tensor_name = scores_tensor.name
return signature
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def generic_signature(name_tensor_map):
"""Creates a generic signature of name to Tensor name.
Args:
name_tensor_map: Map from logical name to Tensor.
Returns:
A Signature message.
"""
signature = manifest_pb2.Signature()
for name, tensor in six.iteritems(name_tensor_map):
signature.generic_signature.map[name].tensor_name = tensor.name
return signature
class Exporter(object):
"""Exporter helps package a TensorFlow model for serving.
Args:
saver: Saver object.
"""
def __init__(self, saver):
# Makes a copy of the saver-def and disables garbage-collection, since the
# exporter enforces garbage-collection independently. Specifically, since
# the exporter performs atomic copies of the saver output, it is required
# that garbage-collection via the underlying saver be disabled.
saver_def = saver.as_saver_def()
saver_def.ClearField("max_to_keep")
self._saver = tf_saver.Saver(saver_def=saver_def)
self._has_init = False
self._assets_to_copy = {}
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def init(self,
graph_def=None,
init_op=None,
clear_devices=False,
default_graph_signature=None,
named_graph_signatures=None,
assets_collection=None,
assets_callback=gfile_copy_callback):
"""Initialization.
Args:
graph_def: A GraphDef message of the graph to be used in inference.
GraphDef of default graph is used when None.
init_op: Op to be used in initialization.
clear_devices: If device info of the graph should be cleared upon export.
default_graph_signature: Default signature of the graph.
named_graph_signatures: Map of named input/output signatures of the graph.
assets_collection: A collection of constant asset filepath tensors. If set
the assets will be exported into the asset directory.
assets_callback: callback with two argument called during export with the
list of files to copy and the asset path.
Raises:
RuntimeError: if init is called more than once.
TypeError: if init_op is not an Operation or None.
ValueError: if asset file path tensors are not non-empty constant string
scalar tensors.
"""
# Avoid Dangerous default value []
if named_graph_signatures is None:
named_graph_signatures = {}
assets = []
if assets_collection:
for asset_tensor in assets_collection:
asset_filepath = self._file_path_value(asset_tensor)
if not asset_filepath:
raise ValueError("invalid asset filepath tensor %s" % asset_tensor)
basename = os.path.basename(asset_filepath)
assets.append((basename, asset_tensor))
self._assets_to_copy[asset_filepath] = basename
if self._has_init:
raise RuntimeError("init should be called only once")
self._has_init = True
if graph_def or clear_devices:
copy = graph_pb2.GraphDef()
if graph_def:
copy.CopyFrom(graph_def)
else:
copy.CopyFrom(ops.get_default_graph().as_graph_def())
if clear_devices:
for node in copy.node:
node.device = ""
graph_any_buf = Any()
graph_any_buf.Pack(copy)
ops.add_to_collection(constants.GRAPH_KEY, graph_any_buf)
if init_op:
if not isinstance(init_op, ops.Operation):
raise TypeError("init_op needs to be an Operation: %s" % init_op)
ops.add_to_collection(constants.INIT_OP_KEY, init_op)
signatures_proto = manifest_pb2.Signatures()
if default_graph_signature:
signatures_proto.default_signature.CopyFrom(default_graph_signature)
for signature_name, signature in six.iteritems(named_graph_signatures):
signatures_proto.named_signatures[signature_name].CopyFrom(signature)
signatures_any_buf = Any()
signatures_any_buf.Pack(signatures_proto)
ops.add_to_collection(constants.SIGNATURES_KEY, signatures_any_buf)
for filename, tensor in assets:
asset = manifest_pb2.AssetFile()
asset.filename = filename
asset.tensor_binding.tensor_name = tensor.name
asset_any_buf = Any()
asset_any_buf.Pack(asset)
ops.add_to_collection(constants.ASSETS_KEY, asset_any_buf)
self._assets_callback = assets_callback
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def export(self,
export_dir_base,
global_step_tensor,
sess=None,
exports_to_keep=None):
"""Exports the model.
Args:
export_dir_base: A string path to the base export dir.
global_step_tensor: An Tensor or tensor name providing the
global step counter to append to the export directory path and set
in the manifest version.
sess: A Session to use to save the parameters.
exports_to_keep: a gc.Path filter function used to determine the set of
exports to keep. If set to None, all versions will be kept.
Returns:
The string path to the exported directory.
Raises:
RuntimeError: if init is not called.
RuntimeError: if the export would overwrite an existing directory.
"""
if not self._has_init:
raise RuntimeError("init must be called first")
# Export dir must not end with / or it will break exports to keep. Strip /.
if export_dir_base.endswith("/"):
export_dir_base = export_dir_base[:-1]
global_step = training_util.global_step(sess, global_step_tensor)
export_dir = os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(constants.VERSION_FORMAT_SPECIFIER % global_step))
# Prevent overwriting on existing exports which could lead to bad/corrupt
# storage and loading of models. This is an important check that must be
# done before any output files or directories are created.
if gfile.Exists(export_dir):
raise RuntimeError("Overwriting exports can cause corruption and are "
"not allowed. Duplicate export dir: %s" % export_dir)
# Output to a temporary directory which is atomically renamed to the final
# directory when complete.
tmp_export_dir = compat.as_text(export_dir) + "-tmp"
gfile.MakeDirs(tmp_export_dir)
self._saver.save(
sess,
os.path.join(
compat.as_text(tmp_export_dir),
compat.as_text(constants.EXPORT_BASE_NAME)),
meta_graph_suffix=constants.EXPORT_SUFFIX_NAME)
# Run the asset callback.
if self._assets_callback and self._assets_to_copy:
assets_dir = os.path.join(
compat.as_bytes(tmp_export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
gfile.MakeDirs(assets_dir)
self._assets_callback(self._assets_to_copy, assets_dir)
# TODO(b/27794910): Delete *checkpoint* file before rename.
gfile.Rename(tmp_export_dir, export_dir)
if exports_to_keep:
# create a simple parser that pulls the export_version from the directory.
def parser(path):
if os.name == "nt":
match = re.match(
r"^" + export_dir_base.replace("\\", "/") + r"/(\d{8})$",
path.path.replace("\\", "/"))
else:
match = re.match(r"^" + export_dir_base + r"/(\d{8})$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
paths_to_delete = gc.negation(exports_to_keep)
for p in paths_to_delete(gc.get_paths(export_dir_base, parser=parser)):
gfile.DeleteRecursively(p.path)
return export_dir
def _file_path_value(self, path_tensor):
"""Returns the filepath value stored in constant `path_tensor`."""
if not isinstance(path_tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if path_tensor.op.type != "Const":
raise TypeError("Only constants tensor are supported")
if path_tensor.dtype != dtypes.string:
raise TypeError("File paths should be string")
str_value = path_tensor.op.get_attr("value").string_val
if len(str_value) != 1:
raise TypeError("Only scalar tensors are supported")
return str_value[0]
|
tensorflow-master
|
tensorflow/contrib/session_bundle/exporter.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for export/import."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
VERSION_FORMAT_SPECIFIER = "%08d"
ASSETS_DIRECTORY = "assets"
EXPORT_BASE_NAME = "export"
EXPORT_SUFFIX_NAME = "meta"
META_GRAPH_DEF_FILENAME = "export.meta"
VARIABLES_FILENAME = "export"
VARIABLES_FILENAME_PATTERN = "export-?????-of-?????"
VARIABLES_FILENAME_PATTERN_V2 = "export.data-?????-of-?????"
VARIABLES_INDEX_FILENAME_V2 = "export.index"
INIT_OP_KEY = "serving_init_op"
SIGNATURES_KEY = "serving_signatures"
ASSETS_KEY = "serving_assets"
GRAPH_KEY = "serving_graph"
REGRESSION_SIGNATURE = "regression_signature"
CLASSIFICATION_SIGNATURE = "classification_signature"
GENERIC_SIGNATURE = "generic_signature"
|
tensorflow-master
|
tensorflow/contrib/session_bundle/constants.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shim for systems that need to load both SessionBundle and SavedModel.
This is intended to be used during migration to SavedModel.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.session_bundle import constants as legacy_constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.contrib.session_bundle import session_bundle
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import meta_graph
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
def _add_input_to_signature_def(tensor_name, map_key, signature_def):
"""Add input tensor to signature_def.
Args:
tensor_name: string name of tensor to add to signature_def inputs
map_key: string key to key into signature_def inputs map
signature_def: object of type meta_graph_pb2.SignatureDef()
Sideffect: adds a TensorInfo with tensor_name to signature_def inputs map
keyed with map_key
"""
tensor_info = meta_graph_pb2.TensorInfo(name=tensor_name)
signature_def.inputs[map_key].CopyFrom(tensor_info)
def _add_output_to_signature_def(tensor_name, map_key, signature_def):
"""Add output tensor to signature_def.
Args:
tensor_name: string name of tensor to add to signature_def outputs
map_key: string key to key into signature_def outputs map
signature_def: object of type meta_graph_pb2.SignatureDef()
Sideffect: adds a TensorInfo with tensor_name to signature_def outputs map
keyed with map_key
"""
tensor_info = meta_graph_pb2.TensorInfo(name=tensor_name)
signature_def.outputs[map_key].CopyFrom(tensor_info)
def _convert_default_signature_to_signature_def(signatures):
"""Convert default signature to object of type SignatureDef.
Args:
signatures: object of type manifest_pb2.Signatures()
Returns:
object of type SignatureDef which contains a converted version of default
signature from input signatures object
Returns None if signature is of generic type because it cannot be converted
to SignatureDef.
"""
default_signature = signatures.default_signature
signature_def = meta_graph_pb2.SignatureDef()
if (default_signature.WhichOneof("type") ==
legacy_constants.REGRESSION_SIGNATURE):
regression_signature = default_signature.regression_signature
signature_def.method_name = signature_constants.REGRESS_METHOD_NAME
_add_input_to_signature_def(regression_signature.input.tensor_name,
signature_constants.REGRESS_INPUTS,
signature_def)
_add_output_to_signature_def(regression_signature.output.tensor_name,
signature_constants.REGRESS_OUTPUTS,
signature_def)
elif (default_signature.WhichOneof("type") ==
legacy_constants.CLASSIFICATION_SIGNATURE):
classification_signature = default_signature.classification_signature
signature_def.method_name = signature_constants.CLASSIFY_METHOD_NAME
_add_input_to_signature_def(classification_signature.input.tensor_name,
signature_constants.CLASSIFY_INPUTS,
signature_def)
_add_output_to_signature_def(classification_signature.classes.tensor_name,
signature_constants.CLASSIFY_OUTPUT_CLASSES,
signature_def)
_add_output_to_signature_def(classification_signature.scores.tensor_name,
signature_constants.CLASSIFY_OUTPUT_SCORES,
signature_def)
else:
logging.error(
"Only classification and regression default signatures "
"are supported for up-conversion. %s is not "
"supported", default_signature.WhichOneof("type"))
return None
return signature_def
def _convert_named_signatures_to_signature_def(signatures):
"""Convert named signatures to object of type SignatureDef.
Args:
signatures: object of type manifest_pb2.Signatures()
Returns:
object of type SignatureDef which contains a converted version of named
signatures from input signatures object
Raises:
RuntimeError: if input and output named signatures are not of type
GenericSignature
"""
signature_def = meta_graph_pb2.SignatureDef()
input_signature = signatures.named_signatures[
signature_constants.PREDICT_INPUTS]
output_signature = signatures.named_signatures[
signature_constants.PREDICT_OUTPUTS]
# TODO(pdudnik): what if there are other signatures? Mimic cr/140900781 once
# it is submitted.
if (input_signature.WhichOneof("type") != legacy_constants.GENERIC_SIGNATURE
or output_signature.WhichOneof("type") !=
legacy_constants.GENERIC_SIGNATURE):
raise RuntimeError("Named input and output signatures can only be "
"up-converted if they are generic signature. "
"Input signature type is %s, output signature type is "
"%s" % (input_signature.WhichOneof("type"),
output_signature.WhichOneof("type")))
signature_def.method_name = signature_constants.PREDICT_METHOD_NAME
for key, val in input_signature.generic_signature.map.items():
_add_input_to_signature_def(val.tensor_name, key, signature_def)
for key, val in output_signature.generic_signature.map.items():
_add_output_to_signature_def(val.tensor_name, key, signature_def)
return signature_def
def _convert_signatures_to_signature_defs(metagraph_def):
"""Produce default and named upconverted SignatureDef objects from Signatures.
Args:
metagraph_def: object of type meta_graph_pb2.MetaGraphDef containing legacy
format Session Bundle signatures
Returns:
default_signature_def: object of type SignatureDef which contains an
upconverted version of default signatures in metagraph_def
named_signature_def: object of type SignatureDef which contains an
upconverted version of named signatures in metagraph_def
"""
collection_def = metagraph_def.collection_def
signatures_proto = manifest_pb2.Signatures()
signatures = collection_def[legacy_constants.SIGNATURES_KEY].any_list.value[0]
signatures.Unpack(signatures_proto)
default_signature_def = None
named_signature_def = None
if signatures_proto.HasField("default_signature"):
default_signature_def = _convert_default_signature_to_signature_def(
signatures_proto)
if len(signatures_proto.named_signatures) > 1:
named_signature_def = _convert_named_signatures_to_signature_def(
signatures_proto)
return default_signature_def, named_signature_def
def _load_saved_model_from_session_bundle_path(export_dir, target, config):
"""Load legacy TF Exporter/SessionBundle checkpoint.
Args:
export_dir: the directory that contains files exported by exporter.
target: The execution engine to connect to. See target in
tf.compat.v1.Session()
config: A ConfigProto proto with configuration options. See config in
tf.compat.v1.Session()
Returns:
session: a tensorflow session created from the variable files.
metagraph_def: The `MetaGraphDef` protocol buffer loaded in the provided
session. This can be used to further extract signature-defs,
collection-defs, etc.
This model is up-converted to SavedModel format. Specifically, metagraph_def
SignatureDef field is populated with Signatures converted from legacy
signatures contained within CollectionDef
Raises:
RuntimeError: If metagraph already contains signature_def and cannot be
up-converted.
"""
meta_graph_filename = os.path.join(export_dir,
legacy_constants.META_GRAPH_DEF_FILENAME)
metagraph_def = meta_graph.read_meta_graph_file(meta_graph_filename)
if metagraph_def.signature_def:
raise RuntimeError("Legacy graph contains signature def, unable to "
"up-convert.")
# Add SignatureDef to metagraph.
default_signature_def, named_signature_def = (
_convert_signatures_to_signature_defs(metagraph_def))
if default_signature_def:
metagraph_def.signature_def[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].CopyFrom(
default_signature_def)
if named_signature_def:
signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
if default_signature_def:
signature_def_key += "_from_named"
metagraph_def.signature_def[signature_def_key].CopyFrom(named_signature_def)
# We cannot just output session we loaded with older metagraph_def and
# up-converted metagraph definition because Session has an internal object of
# type Graph which is populated from meta_graph_def. If we do not create
# session with our new meta_graph_def, then Graph will be out of sync with
# meta_graph_def.
sess, metagraph_def = session_bundle.load_session_bundle_from_path(
export_dir, target, config, meta_graph_def=metagraph_def)
return sess, metagraph_def
def load_session_bundle_or_saved_model_bundle_from_path(export_dir,
tags=None,
target="",
config=None):
"""Load session bundle from the given path.
The function reads input from the export_dir, constructs the graph data to the
default graph and restores the parameters for the session created.
Args:
export_dir: the directory that contains files exported by exporter.
tags: Set of string tags to identify the required MetaGraphDef when model is
saved as SavedModel. These should correspond to the tags used when saving
the variables using the SavedModel `save()` API.
target: The execution engine to connect to. See target in
tf.compat.v1.Session()
config: A ConfigProto proto with configuration options. See config in
tf.compat.v1.Session()
Returns:
session: a tensorflow session created from the variable files.
meta_graph: a meta graph proto saved in the exporter directory.
Raises:
RuntimeError: if the required files are missing or contain unrecognizable
fields, i.e. the exported model is invalid.
"""
metagraph_def = None
sess = None
if loader.maybe_saved_model_directory(export_dir):
sess = session.Session(target, graph=None, config=config)
metagraph_def = loader.load(sess, tags, export_dir)
elif session_bundle.maybe_session_bundle_dir(export_dir):
sess, metagraph_def = _load_saved_model_from_session_bundle_path(
export_dir, target, config)
else:
raise RuntimeError("SessionBundle or SavedModelBundle not found at "
"specified export location: %s" % export_dir)
return sess, metagraph_def
|
tensorflow-master
|
tensorflow/contrib/session_bundle/bundle_shim.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bundle_shim.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.contrib.session_bundle import bundle_shim
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
import tensorflow.python.ops.parsing_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.util import compat
SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123")
SESSION_BUNDLE_PATH = "contrib/session_bundle/testdata/half_plus_two/00000123"
class BundleShimTest(test.TestCase):
def testBadPath(self):
base_path = test.test_src_dir_path("/no/such/a/dir")
ops.reset_default_graph()
with self.assertRaises(RuntimeError):
_, _ = bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
base_path)
def testAddInputToSignatureDef(self):
signature_def = meta_graph_pb2.SignatureDef()
signature_def_compare = meta_graph_pb2.SignatureDef()
# Add input to signature-def corresponding to `foo_key`.
bundle_shim._add_input_to_signature_def("foo-name", "foo-key",
signature_def)
self.assertEqual(len(signature_def.inputs), 1)
self.assertEqual(len(signature_def.outputs), 0)
self.assertProtoEquals(
signature_def.inputs["foo-key"],
meta_graph_pb2.TensorInfo(name="foo-name"))
# Attempt to add another input to the signature-def with the same tensor
# name and key.
bundle_shim._add_input_to_signature_def("foo-name", "foo-key",
signature_def)
self.assertEqual(len(signature_def.inputs), 1)
self.assertEqual(len(signature_def.outputs), 0)
self.assertProtoEquals(
signature_def.inputs["foo-key"],
meta_graph_pb2.TensorInfo(name="foo-name"))
# Add another input to the signature-def corresponding to `bar-key`.
bundle_shim._add_input_to_signature_def("bar-name", "bar-key",
signature_def)
self.assertEqual(len(signature_def.inputs), 2)
self.assertEqual(len(signature_def.outputs), 0)
self.assertProtoEquals(
signature_def.inputs["bar-key"],
meta_graph_pb2.TensorInfo(name="bar-name"))
# Add an input to the signature-def corresponding to `foo-key` with an
# updated tensor name.
bundle_shim._add_input_to_signature_def("bar-name", "foo-key",
signature_def)
self.assertEqual(len(signature_def.inputs), 2)
self.assertEqual(len(signature_def.outputs), 0)
self.assertProtoEquals(
signature_def.inputs["foo-key"],
meta_graph_pb2.TensorInfo(name="bar-name"))
# Test that there are no other side-effects.
del signature_def.inputs["foo-key"]
del signature_def.inputs["bar-key"]
self.assertProtoEquals(signature_def, signature_def_compare)
def testAddOutputToSignatureDef(self):
signature_def = meta_graph_pb2.SignatureDef()
signature_def_compare = meta_graph_pb2.SignatureDef()
# Add output to signature-def corresponding to `foo_key`.
bundle_shim._add_output_to_signature_def("foo-name", "foo-key",
signature_def)
self.assertEqual(len(signature_def.outputs), 1)
self.assertEqual(len(signature_def.inputs), 0)
self.assertProtoEquals(
signature_def.outputs["foo-key"],
meta_graph_pb2.TensorInfo(name="foo-name"))
# Attempt to add another output to the signature-def with the same tensor
# name and key.
bundle_shim._add_output_to_signature_def("foo-name", "foo-key",
signature_def)
self.assertEqual(len(signature_def.outputs), 1)
self.assertEqual(len(signature_def.inputs), 0)
self.assertProtoEquals(
signature_def.outputs["foo-key"],
meta_graph_pb2.TensorInfo(name="foo-name"))
# Add another output to the signature-def corresponding to `bar-key`.
bundle_shim._add_output_to_signature_def("bar-name", "bar-key",
signature_def)
self.assertEqual(len(signature_def.outputs), 2)
self.assertEqual(len(signature_def.inputs), 0)
self.assertProtoEquals(
signature_def.outputs["bar-key"],
meta_graph_pb2.TensorInfo(name="bar-name"))
# Add an output to the signature-def corresponding to `foo-key` with an
# updated tensor name.
bundle_shim._add_output_to_signature_def("bar-name", "foo-key",
signature_def)
self.assertEqual(len(signature_def.outputs), 2)
self.assertEqual(len(signature_def.inputs), 0)
self.assertProtoEquals(
signature_def.outputs["foo-key"],
meta_graph_pb2.TensorInfo(name="bar-name"))
# Test that there are no other sideeffects.
del signature_def.outputs["foo-key"]
del signature_def.outputs["bar-key"]
self.assertProtoEquals(signature_def, signature_def_compare)
def testConvertDefaultSignatureGenericToSignatureDef(self):
signatures_proto = manifest_pb2.Signatures()
generic_signature = manifest_pb2.GenericSignature()
signatures_proto.default_signature.generic_signature.CopyFrom(
generic_signature)
signature_def = bundle_shim._convert_default_signature_to_signature_def(
signatures_proto)
self.assertEquals(signature_def, None)
def testConvertDefaultSignatureRegressionToSignatureDef(self):
signatures_proto = manifest_pb2.Signatures()
regression_signature = manifest_pb2.RegressionSignature()
regression_signature.input.CopyFrom(
manifest_pb2.TensorBinding(
tensor_name=signature_constants.REGRESS_INPUTS))
regression_signature.output.CopyFrom(
manifest_pb2.TensorBinding(
tensor_name=signature_constants.REGRESS_OUTPUTS))
signatures_proto.default_signature.regression_signature.CopyFrom(
regression_signature)
signature_def = bundle_shim._convert_default_signature_to_signature_def(
signatures_proto)
# Validate regression signature correctly copied over.
self.assertEqual(signature_def.method_name,
signature_constants.REGRESS_METHOD_NAME)
self.assertEqual(len(signature_def.inputs), 1)
self.assertEqual(len(signature_def.outputs), 1)
self.assertProtoEquals(
signature_def.inputs[signature_constants.REGRESS_INPUTS],
meta_graph_pb2.TensorInfo(name=signature_constants.REGRESS_INPUTS))
self.assertProtoEquals(
signature_def.outputs[signature_constants.REGRESS_OUTPUTS],
meta_graph_pb2.TensorInfo(name=signature_constants.REGRESS_OUTPUTS))
def testConvertDefaultSignatureClassificationToSignatureDef(self):
signatures_proto = manifest_pb2.Signatures()
classification_signature = manifest_pb2.ClassificationSignature()
classification_signature.input.CopyFrom(
manifest_pb2.TensorBinding(
tensor_name=signature_constants.CLASSIFY_INPUTS))
classification_signature.classes.CopyFrom(
manifest_pb2.TensorBinding(
tensor_name=signature_constants.CLASSIFY_OUTPUT_CLASSES))
classification_signature.scores.CopyFrom(
manifest_pb2.TensorBinding(
tensor_name=signature_constants.CLASSIFY_OUTPUT_SCORES))
signatures_proto.default_signature.classification_signature.CopyFrom(
classification_signature)
signatures_proto.default_signature.classification_signature.CopyFrom(
classification_signature)
signature_def = bundle_shim._convert_default_signature_to_signature_def(
signatures_proto)
# Validate classification signature correctly copied over.
self.assertEqual(signature_def.method_name,
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(len(signature_def.inputs), 1)
self.assertEqual(len(signature_def.outputs), 2)
self.assertProtoEquals(
signature_def.inputs[signature_constants.CLASSIFY_INPUTS],
meta_graph_pb2.TensorInfo(name=signature_constants.CLASSIFY_INPUTS))
self.assertProtoEquals(
signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_SCORES],
meta_graph_pb2.TensorInfo(
name=signature_constants.CLASSIFY_OUTPUT_SCORES))
self.assertProtoEquals(
signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES],
meta_graph_pb2.TensorInfo(
name=signature_constants.CLASSIFY_OUTPUT_CLASSES))
def testConvertNamedSignatureNonGenericToSignatureDef(self):
signatures_proto = manifest_pb2.Signatures()
regression_signature = manifest_pb2.RegressionSignature()
signatures_proto.named_signatures[
signature_constants.PREDICT_INPUTS].regression_signature.CopyFrom(
regression_signature)
with self.assertRaises(RuntimeError):
_ = bundle_shim._convert_named_signatures_to_signature_def(
signatures_proto)
signatures_proto = manifest_pb2.Signatures()
classification_signature = manifest_pb2.ClassificationSignature()
signatures_proto.named_signatures[
signature_constants.PREDICT_INPUTS].classification_signature.CopyFrom(
classification_signature)
with self.assertRaises(RuntimeError):
_ = bundle_shim._convert_named_signatures_to_signature_def(
signatures_proto)
def testConvertNamedSignatureToSignatureDef(self):
signatures_proto = manifest_pb2.Signatures()
generic_signature = manifest_pb2.GenericSignature()
generic_signature.map["input_key"].CopyFrom(
manifest_pb2.TensorBinding(tensor_name="input"))
signatures_proto.named_signatures[
signature_constants.PREDICT_INPUTS].generic_signature.CopyFrom(
generic_signature)
generic_signature = manifest_pb2.GenericSignature()
generic_signature.map["output_key"].CopyFrom(
manifest_pb2.TensorBinding(tensor_name="output"))
signatures_proto.named_signatures[
signature_constants.PREDICT_OUTPUTS].generic_signature.CopyFrom(
generic_signature)
signature_def = bundle_shim._convert_named_signatures_to_signature_def(
signatures_proto)
self.assertEqual(signature_def.method_name,
signature_constants.PREDICT_METHOD_NAME)
self.assertEqual(len(signature_def.inputs), 1)
self.assertEqual(len(signature_def.outputs), 1)
self.assertProtoEquals(
signature_def.inputs["input_key"],
meta_graph_pb2.TensorInfo(name="input"))
self.assertProtoEquals(
signature_def.outputs["output_key"],
meta_graph_pb2.TensorInfo(name="output"))
def testConvertSignaturesToSignatureDefs(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
meta_graph_filename = os.path.join(base_path,
constants.META_GRAPH_DEF_FILENAME)
metagraph_def = meta_graph.read_meta_graph_file(meta_graph_filename)
default_signature_def, named_signature_def = (
bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
self.assertEqual(default_signature_def.method_name,
signature_constants.REGRESS_METHOD_NAME)
self.assertEqual(len(default_signature_def.inputs), 1)
self.assertEqual(len(default_signature_def.outputs), 1)
self.assertProtoEquals(
default_signature_def.inputs[signature_constants.REGRESS_INPUTS],
meta_graph_pb2.TensorInfo(name="tf_example:0"))
self.assertProtoEquals(
default_signature_def.outputs[signature_constants.REGRESS_OUTPUTS],
meta_graph_pb2.TensorInfo(name="Identity:0"))
self.assertEqual(named_signature_def.method_name,
signature_constants.PREDICT_METHOD_NAME)
self.assertEqual(len(named_signature_def.inputs), 1)
self.assertEqual(len(named_signature_def.outputs), 1)
self.assertProtoEquals(
named_signature_def.inputs["x"], meta_graph_pb2.TensorInfo(name="x:0"))
self.assertProtoEquals(
named_signature_def.outputs["y"], meta_graph_pb2.TensorInfo(name="y:0"))
# Now try default signature only
collection_def = metagraph_def.collection_def
signatures_proto = manifest_pb2.Signatures()
signatures = collection_def[constants.SIGNATURES_KEY].any_list.value[0]
signatures.Unpack(signatures_proto)
named_only_signatures_proto = manifest_pb2.Signatures()
named_only_signatures_proto.CopyFrom(signatures_proto)
default_only_signatures_proto = manifest_pb2.Signatures()
default_only_signatures_proto.CopyFrom(signatures_proto)
default_only_signatures_proto.named_signatures.clear()
default_only_signatures_proto.ClearField("named_signatures")
metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
0].Pack(default_only_signatures_proto)
default_signature_def, named_signature_def = (
bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
self.assertEqual(default_signature_def.method_name,
signature_constants.REGRESS_METHOD_NAME)
self.assertEqual(named_signature_def, None)
named_only_signatures_proto.ClearField("default_signature")
metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
0].Pack(named_only_signatures_proto)
default_signature_def, named_signature_def = (
bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
self.assertEqual(named_signature_def.method_name,
signature_constants.PREDICT_METHOD_NAME)
self.assertEqual(default_signature_def, None)
def testLegacyBasic(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
ops.reset_default_graph()
sess, meta_graph_def = (
bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
base_path,
tags=[""],
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEqual(len(signatures_any), 1)
def testSavedModelBasic(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
ops.reset_default_graph()
sess, meta_graph_def = (
bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
base_path,
tags=[tag_constants.SERVING],
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})))
self.assertTrue(sess)
# Check basic signature def property.
signature_def = meta_graph_def.signature_def
self.assertEqual(signature_def["regress_x_to_y"].method_name,
signature_constants.REGRESS_METHOD_NAME)
with sess.as_default():
output1 = sess.run(["filename_tensor:0"])
self.assertEqual([compat.as_bytes("foo.txt")], output1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/session_bundle/bundle_shim_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for session_bundle.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import shutil
import numpy as np
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.contrib.session_bundle import session_bundle
from tensorflow.core.example.example_pb2 import Example
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.parsing_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
SAVED_MODEL_PATH = (
"python/saved_model/example/saved_model_half_plus_two/00000123")
SESSION_BUNDLE_PATH = "contrib/session_bundle/testdata/half_plus_two/00000123"
def _make_serialized_example(x):
example = Example()
example.features.feature["x"].float_list.value.append(x)
return example.SerializeToString()
class SessionBundleLoadTest(test.TestCase):
def _checkRegressionSignature(self, signatures, sess):
default_signature = signatures.default_signature
input_name = default_signature.regression_signature.input.tensor_name
output_name = default_signature.regression_signature.output.tensor_name
tf_example = [_make_serialized_example(x) for x in [0, 1, 2, 3]]
y = sess.run([output_name], {input_name: tf_example})
# The operation is y = 0.5 * x + 2
self.assertEqual(y[0][0], 2)
self.assertEqual(y[0][1], 2.5)
self.assertEqual(y[0][2], 3)
self.assertEqual(y[0][3], 3.5)
def _checkNamedSignatures(self, signatures, sess):
named_signatures = signatures.named_signatures
input_name = (named_signatures["inputs"].generic_signature.map["x"]
.tensor_name)
output_name = (named_signatures["outputs"].generic_signature.map["y"]
.tensor_name)
y = sess.run([output_name], {input_name: np.array([[0], [1], [2], [3]])})
# The operation is y = 0.5 * x + 2
self.assertEqual(y[0][0], 2)
self.assertEqual(y[0][1], 2.5)
self.assertEqual(y[0][2], 3)
self.assertEqual(y[0][3], 3.5)
def testMaybeSessionBundleDir(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
self.assertTrue(session_bundle.maybe_session_bundle_dir(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
base_path = "complete_garbage"
self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
def testBasic(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
def testBadPath(self):
base_path = test.test_src_dir_path("/no/such/a/dir")
ops.reset_default_graph()
with self.assertRaises(RuntimeError) as cm:
_, _ = session_bundle.load_session_bundle_from_path(
base_path,
target="local",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue("Expected meta graph file missing" in str(cm.exception))
def testVarCheckpointV2(self):
base_path = test.test_src_dir_path(
"contrib/session_bundle/testdata/half_plus_two_ckpt_v2/00000123")
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
class SessionBundleLoadNoVarsTest(test.TestCase):
"""Test the case where there are no variables in the graph."""
def setUp(self):
self.base_path = os.path.join(test.get_temp_dir(), "no_vars")
if not os.path.exists(self.base_path):
os.mkdir(self.base_path)
# Create a simple graph with a variable, then convert variables to
# constants and export the graph.
with ops.Graph().as_default() as g:
x = array_ops.placeholder(dtypes.float32, name="x")
w = variables.Variable(3.0)
y = math_ops.subtract(w * x, 7.0, name="y") # pylint: disable=unused-variable
ops.add_to_collection("meta", "this is meta")
with self.session(graph=g) as session:
variables.global_variables_initializer().run()
new_graph_def = graph_util.convert_variables_to_constants(
session, g.as_graph_def(), ["y"])
filename = os.path.join(self.base_path, constants.META_GRAPH_DEF_FILENAME)
saver.export_meta_graph(
filename, graph_def=new_graph_def, collection_list=["meta"])
def tearDown(self):
shutil.rmtree(self.base_path)
def testGraphWithoutVarsLoadsCorrectly(self):
session, _ = session_bundle.load_session_bundle_from_path(self.base_path)
got = session.run(["y:0"], {"x:0": 5.0})[0]
self.assertEquals(got, 5.0 * 3.0 - 7.0)
self.assertEquals(ops.get_collection("meta"), [b"this is meta"])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/session_bundle/session_bundle_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for session_bundle.gc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
class GcTest(test_util.TensorFlowTestCase):
def testLargestExportVersions(self):
paths = [gc.Path("/foo", 8), gc.Path("/foo", 9), gc.Path("/foo", 10)]
newest = gc.largest_export_versions(2)
n = newest(paths)
self.assertEquals(n, [gc.Path("/foo", 9), gc.Path("/foo", 10)])
def testLargestExportVersionsDoesNotDeleteZeroFolder(self):
paths = [gc.Path("/foo", 0), gc.Path("/foo", 3)]
newest = gc.largest_export_versions(2)
n = newest(paths)
self.assertEquals(n, [gc.Path("/foo", 0), gc.Path("/foo", 3)])
def testModExportVersion(self):
paths = [
gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6),
gc.Path("/foo", 9)
]
mod = gc.mod_export_version(2)
self.assertEquals(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 6)])
mod = gc.mod_export_version(3)
self.assertEquals(mod(paths), [gc.Path("/foo", 6), gc.Path("/foo", 9)])
def testOneOfEveryNExportVersions(self):
paths = [
gc.Path("/foo", 0), gc.Path("/foo", 1), gc.Path("/foo", 3),
gc.Path("/foo", 5), gc.Path("/foo", 6), gc.Path("/foo", 7),
gc.Path("/foo", 8), gc.Path("/foo", 33)
]
one_of = gc.one_of_every_n_export_versions(3)
self.assertEquals(
one_of(paths), [
gc.Path("/foo", 3), gc.Path("/foo", 6), gc.Path("/foo", 8),
gc.Path("/foo", 33)
])
def testOneOfEveryNExportVersionsZero(self):
# Zero is a special case since it gets rolled into the first interval.
# Test that here.
paths = [gc.Path("/foo", 0), gc.Path("/foo", 4), gc.Path("/foo", 5)]
one_of = gc.one_of_every_n_export_versions(3)
self.assertEquals(one_of(paths), [gc.Path("/foo", 0), gc.Path("/foo", 5)])
def testUnion(self):
paths = []
for i in xrange(10):
paths.append(gc.Path("/foo", i))
f = gc.union(gc.largest_export_versions(3), gc.mod_export_version(3))
self.assertEquals(
f(paths), [
gc.Path("/foo", 0), gc.Path("/foo", 3), gc.Path("/foo", 6),
gc.Path("/foo", 7), gc.Path("/foo", 8), gc.Path("/foo", 9)
])
def testNegation(self):
paths = [
gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6),
gc.Path("/foo", 9)
]
mod = gc.negation(gc.mod_export_version(2))
self.assertEquals(mod(paths), [gc.Path("/foo", 5), gc.Path("/foo", 9)])
mod = gc.negation(gc.mod_export_version(3))
self.assertEquals(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 5)])
def testPathsWithParse(self):
base_dir = os.path.join(test.get_temp_dir(), "paths_parse")
self.assertFalse(gfile.Exists(base_dir))
for p in xrange(3):
gfile.MakeDirs(os.path.join(base_dir, "%d" % p))
# add a base_directory to ignore
gfile.MakeDirs(os.path.join(base_dir, "ignore"))
# create a simple parser that pulls the export_version from the directory.
def parser(path):
match = re.match(r"^" + base_dir + r"/(\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
self.assertEquals(
gc.get_paths(
base_dir, parser=parser), [
gc.Path(os.path.join(base_dir, "0"), 0),
gc.Path(os.path.join(base_dir, "1"), 1),
gc.Path(os.path.join(base_dir, "2"), 2)
])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/session_bundle/gc_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a toy linear regression inference graph.
Exports a TensorFlow graph to /tmp/half_plus_two/ based on the Exporter
format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise Session
loading and execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
FLAGS = None
def Export(export_dir, use_checkpoint_v2):
with tf.Session() as sess:
# Make model parameters a&b variables instead of constants to
# exercise the variable reloading mechanisms.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Create a placeholder for serialized tensorflow.Example messages to be fed.
serialized_tf_example = tf.placeholder(tf.string, name="tf_example")
# Parse the tensorflow.Example looking for a feature named "x" with a single
# floating point value.
feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
# Use tf.identity() to assign name
x = tf.identity(tf_example["x"], name="x")
# Calculate, y = a*x + b
y = tf.add(tf.multiply(a, x), b, name="y")
# Setup a standard Saver for our variables.
save = tf.train.Saver(
{
"a": a,
"b": b
},
sharded=True,
write_version=tf.train.SaverDef.V2 if use_checkpoint_v2 else
tf.train.SaverDef.V1)
# asset_path contains the base directory of assets used in training (e.g.
# vocabulary files).
original_asset_path = tf.constant("/tmp/original/export/assets")
# Ops reading asset files should reference the asset_path tensor
# which stores the original asset path at training time and the
# overridden assets directory at restore time.
asset_path = tf.Variable(original_asset_path,
name="asset_path",
trainable=False,
collections=[])
assign_asset_path = asset_path.assign(original_asset_path)
# Use a fixed global step number.
global_step_tensor = tf.Variable(123, name="global_step")
# Create a RegressionSignature for our input and output.
regression_signature = exporter.regression_signature(
input_tensor=serialized_tf_example,
# Use tf.identity here because we export two signatures here.
# Otherwise only graph for one of the signatures will be loaded
# (whichever is created first) during serving.
output_tensor=tf.identity(y))
named_graph_signature = {
"inputs": exporter.generic_signature({"x": x}),
"outputs": exporter.generic_signature({"y": y})
}
# Create two filename assets and corresponding tensors.
# TODO(b/26254158) Consider adding validation of file existence as well as
# hashes (e.g. sha1) for consistency.
original_filename1 = tf.constant("hello1.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)
filename1 = tf.Variable(original_filename1,
name="filename1",
trainable=False,
collections=[])
assign_filename1 = filename1.assign(original_filename1)
original_filename2 = tf.constant("hello2.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename2)
filename2 = tf.Variable(original_filename2,
name="filename2",
trainable=False,
collections=[])
assign_filename2 = filename2.assign(original_filename2)
# Init op contains a group of all variables that we assign.
init_op = tf.group(assign_asset_path, assign_filename1, assign_filename2)
# CopyAssets is used as a callback during export to copy files to the
# given export directory.
def CopyAssets(filepaths, export_path):
print("copying asset files to: %s" % export_path)
for filepath in filepaths:
print("copying asset file: %s" % filepath)
# Run an export.
tf.global_variables_initializer().run()
export = exporter.Exporter(save)
export.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=regression_signature,
named_graph_signatures=named_graph_signature,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
assets_callback=CopyAssets)
export.export(export_dir, global_step_tensor, sess)
def main(_):
Export(FLAGS.export_dir, FLAGS.use_checkpoint_v2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--export_dir",
type=str,
default="/tmp/half_plus_two",
help="Directory where to export inference model."
)
parser.add_argument(
"--use_checkpoint_v2",
type="bool",
nargs="?",
const=True,
default=False,
help="If true, write v2 checkpoint files.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/session_bundle/example/export_half_plus_two.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient boosted trees implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.boosted_trees.python import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for combined DNN + GBDT training model.
The combined model trains a DNN first, then trains boosted trees to boost the
logits of the DNN. The input layer of the DNN (including the embeddings learned
over sparse features) can optionally be provided to the boosted trees as
an additional input feature.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.boosted_trees.estimator_batch import model
from tensorflow.contrib.boosted_trees.estimator_batch import distillation_loss
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.feature_column import feature_column_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_DNN_LEARNING_RATE = 0.001
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_tree_combined_model_fn(
features,
labels,
mode,
head,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
config=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
output_type=model.ModelBuilderOutputType.MODEL_FN_OPS,
override_global_step_value=None):
"""DNN and GBDT combined model_fn.
Args:
features: `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
head: A `Head` instance.
dnn_hidden_units: List of hidden units per layer.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
config: `RunConfig` of the estimator.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate of 0.001.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
if not isinstance(features, dict):
raise ValueError("features should be a dictionary of `Tensor`s. "
"Given type: {}".format(type(features)))
if not dnn_feature_columns:
raise ValueError("dnn_feature_columns must be specified")
if dnn_to_tree_distillation_param:
if not predict_with_tree_only:
logging.warning("update predict_with_tree_only to True since distillation"
"is specified.")
predict_with_tree_only = True
# Build DNN Logits.
dnn_parent_scope = "dnn"
dnn_partitioner = dnn_input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=config.num_ps_replicas, min_slice_size=64 << 20))
if (output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC and
not use_core_versions):
raise ValueError("You must use core versions with Estimator Spec")
global_step = training_util.get_global_step()
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner) as input_layer_scope:
if use_core_versions:
input_layer = feature_column_lib.input_layer(
features=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope])
else:
input_layer = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope],
scope=input_layer_scope)
def dnn_logits_fn():
"""Builds the logits from the input layer."""
previous_layer = input_layer
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(previous_layer,)) as hidden_layer_scope:
net = layers.fully_connected(
previous_layer,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=hidden_layer_scope)
if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
previous_layer = net
with variable_scope.variable_scope(
"logits", values=(previous_layer,)) as logits_scope:
dnn_logits = layers.fully_connected(
previous_layer,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(dnn_logits, logits_scope.name)
return dnn_logits
if predict_with_tree_only and mode == model_fn.ModeKeys.INFER:
dnn_logits = array_ops.constant(0.0)
dnn_train_op_fn = control_flow_ops.no_op
elif predict_with_tree_only and mode == model_fn.ModeKeys.EVAL:
dnn_logits = control_flow_ops.cond(
global_step > dnn_steps_to_train,
lambda: array_ops.constant(0.0),
dnn_logits_fn)
dnn_train_op_fn = control_flow_ops.no_op
else:
dnn_logits = dnn_logits_fn()
def dnn_train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=training_util.get_global_step(),
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
name=dnn_parent_scope,
variables=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope),
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
# Build Tree Logits.
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
tree_features = features.copy()
if dnn_input_layer_to_tree:
tree_features["dnn_input_layer"] = input_layer
tree_feature_columns.append(layers.real_valued_column("dnn_input_layer"))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=tree_center_bias,
examples_per_layer=tree_examples_per_layer,
learner_config=tree_learner_config,
feature_columns=tree_feature_columns,
logits_dimension=head.logits_dimension,
features=tree_features,
use_core_columns=use_core_versions)
with ops.name_scope("gbdt"):
predictions_dict = gbdt_model.predict(mode)
tree_logits = predictions_dict["predictions"]
def _tree_train_op_fn(loss):
"""Returns the op to optimize the loss."""
if dnn_to_tree_distillation_param:
loss_weight, loss_fn = dnn_to_tree_distillation_param
# pylint: disable=protected-access
if use_core_versions:
weight_tensor = head_lib._weight_tensor(features, head._weight_column)
else:
weight_tensor = head_lib._weight_tensor(
features, head.weight_column_name)
# pylint: enable=protected-access
dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)
if loss_fn is None:
# we create the loss_fn similar to the head loss_fn for
# multi_class_head used previously as the default one.
n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
n_classes)
dnn_to_tree_distillation_loss = loss_weight * loss_fn(
dnn_logits_fixed, tree_logits, weight_tensor)
summary.scalar("dnn_to_tree_distillation_loss",
dnn_to_tree_distillation_loss)
loss += dnn_to_tree_distillation_loss
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
if predict_with_tree_only:
if mode == model_fn.ModeKeys.TRAIN or mode == model_fn.ModeKeys.INFER:
tree_train_logits = tree_logits
else:
tree_train_logits = control_flow_ops.cond(
global_step > dnn_steps_to_train,
lambda: tree_logits,
lambda: dnn_logits)
else:
tree_train_logits = dnn_logits + tree_logits
def _no_train_op_fn(loss):
"""Returns a no-op."""
del loss
return control_flow_ops.no_op()
if tree_center_bias:
num_trees += 1
finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
if output_type == model.ModelBuilderOutputType.MODEL_FN_OPS:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
if mode != model_fn.ModeKeys.TRAIN:
return model_fn_ops
dnn_train_op = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=dnn_train_op_fn,
logits=dnn_logits).train_op
tree_train_op = head.create_model_fn_ops(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits).train_op
# Add the hooks
model_fn_ops.training_hooks.extend([
trainer_hooks.SwitchTrainOp(dnn_train_op, dnn_steps_to_train,
tree_train_op),
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value)
])
return model_fn_ops
elif output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC:
fusion_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
if mode != model_fn.ModeKeys.TRAIN:
return fusion_spec
dnn_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=dnn_train_op_fn,
logits=dnn_logits)
tree_spec = head.create_estimator_spec(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits)
training_hooks = [
trainer_hooks.SwitchTrainOp(dnn_spec.train_op, dnn_steps_to_train,
tree_spec.train_op),
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value)
]
fusion_spec = fusion_spec._replace(training_hooks=training_hooks +
list(fusion_spec.training_hooks))
return fusion_spec
class DNNBoostedTreeCombinedClassifier(estimator.Estimator):
"""A classifier that uses a combined DNN/GBDT model."""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
n_classes=2,
weight_column_name=None,
model_dir=None,
config=None,
label_name=None,
label_keys=None,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedClassifier instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
n_classes: The number of label classes.
weight_column_name: The name of weight column.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
head = head_lib.multi_class_head(
n_classes=n_classes,
label_name=label_name,
label_keys=label_keys,
weight_column_name=weight_column_name,
enable_centered_bias=False)
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedClassifier, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class DNNBoostedTreeCombinedRegressor(estimator.Estimator):
"""A regressor that uses a combined DNN/GBDT model."""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
weight_column_name=None,
model_dir=None,
config=None,
label_name=None,
label_dimension=1,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedRegressor instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
weight_column_name: The name of weight column.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
head = head_lib.regression_head(
label_name=label_name,
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=False)
# num_classes needed for GradientBoostedDecisionTreeModel
if label_dimension == 1:
tree_learner_config.num_classes = 2
else:
tree_learner_config.num_classes = label_dimension
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedRegressor, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class DNNBoostedTreeCombinedEstimator(estimator.Estimator):
"""An estimator that uses a combined DNN/GBDT model.
Useful for training with user specified `Head`.
"""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
head,
model_dir=None,
config=None,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedEstimator instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
head: `Head` instance.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedEstimator, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class CoreDNNBoostedTreeCombinedEstimator(core_estimator.Estimator):
"""Initializes a core version of DNNBoostedTreeCombinedEstimator.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
head: `Head` instance.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
"""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
head,
model_dir=None,
config=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None):
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC,
use_core_versions=True,
override_global_step_value=None)
super(CoreDNNBoostedTreeCombinedEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utill functions for distillation loss.
The distillation loss_fn will be called with the following:
Args:
dnn_logits: Tensor of logits from the dnn, treated as the "target". This will
be the output of a call to tf.stop_gradient().
tree_logits: Tensor of logits from the tree, treated as the "predictions".
example_weights: Tensor of example weights, or a single scalar.
Returns:
A scalar indicating the reduced loss for that batch of examples.
Note: we calls the loss_fn defined in contrib head, which is computing two
losses, first one for training and second one for reporting. We only take the
first one here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def _logits_to_label_for_tree(logits, n_classes):
if n_classes == 2:
return math_ops.sigmoid(logits)
else:
return nn.softmax(logits)
def create_dnn_to_tree_squared_loss_fn(n_classes):
"""Returns a squared loss function for dnn to tree distillation."""
def _dnn_to_tree_squared_loss(dnn_logits, tree_logits, example_weights):
return head_lib._mean_squared_loss( # pylint: disable=protected-access
labels=_logits_to_label_for_tree(dnn_logits, n_classes),
logits=_logits_to_label_for_tree(tree_logits, n_classes),
weights=example_weights)[0]
return _dnn_to_tree_squared_loss
def create_dnn_to_tree_cross_entropy_loss_fn(n_classes):
"""Returns a cross entropy loss function for dnn to tree distillation."""
def _dnn_to_tree_cross_entropy_loss(dnn_logits, tree_logits, example_weights):
if n_classes == 2:
return head_lib._log_loss_with_two_classes( # pylint: disable=protected-access
labels=_logits_to_label_for_tree(dnn_logits, n_classes),
logits=tree_logits,
weights=example_weights)[0]
else:
return head_lib._softmax_cross_entropy_loss( # pylint: disable=protected-access
labels=_logits_to_label_for_tree(dnn_logits, n_classes),
logits=tree_logits,
weights=example_weights)[0]
return _dnn_to_tree_cross_entropy_loss
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/distillation_loss.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the conversion code and for feature importances export.
Tests that cover conversion from TFBT format to a tensorflow.contrib.
decision_tree generic_tree_model format and feature importances export.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.contrib.boosted_trees.estimator_batch import custom_export_strategy
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class ConvertModelTest(test_util.TensorFlowTestCase):
def _make_trees(self):
dtec_str = """
trees {
nodes {
leaf {
vector {
value: -1
}
}
}
}
trees {
nodes {
dense_float_binary_split {
feature_column: 0
threshold: 1740.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 500
}
}
nodes {
leaf {
vector {
value: 0.6
}
}
}
nodes {
sparse_float_binary_split_default_left {
split {
feature_column: 0
threshold: 1500.0
left_id: 3
right_id: 4
}
}
node_metadata {
gain: 500
}
}
nodes {
categorical_id_binary_split {
feature_column: 0
feature_id: 5
left_id: 5
right_id: 6
}
node_metadata {
gain: 500
}
}
nodes {
leaf {
vector {
value: 0.8
}
}
}
nodes {
leaf {
vector {
value: 0.5
}
}
}
nodes {
sparse_float_binary_split_default_right {
split {
feature_column: 1
dimension_id:3
threshold: -0.4
left_id: 7
right_id: 8
}
}
node_metadata {
gain: 3600
}
}
nodes {
leaf {
vector {
value: 0.36
}
}
}
nodes {
leaf {
vector {
value: 18
}
}
}
}
tree_weights: 1.0
tree_weights: 0.1
"""
dtec = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(dtec_str, dtec)
feature_columns = [
"feature_b",
"feature_a",
"feature_a_m",
"feature_d",
]
return dtec, feature_columns
def testConvertModel(self):
dtec, feature_columns = self._make_trees()
# Assume 2 sparse float columns, one with 1 dimension, the second one with
# 5 dimensions.
# The feature columns in the order they were added.
out = custom_export_strategy.convert_to_universal_format(
dtec, feature_columns, 1, 2, 1)
# Features a and a_m are sparse float features, a_m is multidimensional.
expected_tree = """
features { key: "feature_a_0" }
features { key: "feature_a_m_3" }
features { key: "feature_b" }
features { key: "feature_d" }
model {
ensemble {
summation_combination_technique {
}
members {
submodel {
decision_tree {
nodes {
node_id {
}
leaf {
vector {
value {
float_value: -1.0
}
}
}
}
}
}
submodel_id {
}
}
members {
submodel {
decision_tree {
nodes {
node_id {
}
binary_node {
left_child_id {
value: 1
}
right_child_id {
value: 2
}
inequality_left_child_test {
feature_id {
id {
value: "feature_b"
}
}
threshold {
float_value: 1740.0
}
}
}
}
nodes {
node_id {
value: 1
}
leaf {
vector {
value {
float_value: 0.06
}
}
}
}
nodes {
node_id {
value: 2
}
binary_node {
left_child_id {
value: 3
}
right_child_id {
value: 4
}
inequality_left_child_test {
feature_id {
id {
value: "feature_a_0"
}
}
threshold {
float_value: 1500.0
}
}
}
}
nodes {
node_id {
value: 3
}
binary_node {
left_child_id {
value: 5
}
right_child_id {
value: 6
}
default_direction: RIGHT
custom_left_child_test {
[type.googleapis.com/tensorflow.decision_trees.MatchingValuesTest] {
feature_id {
id {
value: "feature_d"
}
}
value {
int64_value: 5
}
}
}
}
}
nodes {
node_id {
value: 4
}
leaf {
vector {
value {
float_value: 0.08
}
}
}
}
nodes {
node_id {
value: 5
}
leaf {
vector {
value {
float_value: 0.05
}
}
}
}
nodes {
node_id {
value: 6
}
binary_node {
left_child_id {
value: 7
}
right_child_id {
value: 8
}
default_direction: RIGHT
inequality_left_child_test {
feature_id {
id {
value: "feature_a_m_3"
}
}
threshold {
float_value: -0.4
}
}
}
}
nodes {
node_id {
value: 7
}
leaf {
vector {
value {
float_value: 0.036
}
}
}
}
nodes {
node_id {
value: 8
}
leaf {
vector {
value {
float_value: 1.8
}
}
}
}
}
}
submodel_id {
value: 1
}
}
}
}"""
self.assertProtoEquals(expected_tree, out)
def testFeatureImportance(self):
dtec, feature_columns = self._make_trees()
feature_importances = custom_export_strategy._get_feature_importances(
dtec, feature_columns, 1, 2, 1)
self.assertItemsEqual(
["feature_b", "feature_a_0", "feature_a_m_3", "feature_d"],
feature_importances.keys())
self.assertAlmostEqual(50.0, feature_importances["feature_b"], places=4)
self.assertAlmostEqual(50.0, feature_importances["feature_a_0"], places=4)
self.assertAlmostEqual(50.0, feature_importances["feature_d"], places=4)
self.assertAlmostEqual(
360.0, feature_importances["feature_a_m_3"], places=4)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.boosted_trees.estimator_batch import estimator
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column as contrib_feature_column
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
def _train_input_fn():
features = {"x": constant_op.constant([[2.], [1.], [1.]])}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _multiclass_train_input_fn():
features = {
"x": constant_op.constant([[2.], [1.], [1.], [5.], [3.5], [4.6], [3.5]])
}
label = constant_op.constant([[1], [0], [0], [2], [2], [0], [1]],
dtype=dtypes.int32)
return features, label
def _ranking_train_input_fn():
features = {
"a.f1": constant_op.constant([[3.], [0.3], [1.]]),
"a.f2": constant_op.constant([[0.1], [3.], [1.]]),
"b.f1": constant_op.constant([[13.], [0.4], [5.]]),
"b.f2": constant_op.constant([[1.], [3.], [0.01]]),
}
label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
def _infer_ranking_train_input_fn():
features = {
"f1": constant_op.constant([[3.], [2], [1.]]),
"f2": constant_op.constant([[0.1], [3.], [1.]])
}
return features, None
_QUANTILE_REGRESSION_SIZE = 1000
def _quantile_regression_input_fns(two_dimension=False):
# The data generation is taken from
# http://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
def g(x):
"""The function to predict."""
return x * np.cos(x)
# Training data.
x = np.atleast_2d(np.random.uniform(0, 10.0,
size=_QUANTILE_REGRESSION_SIZE)).T
x = x.astype(np.float32)
# Labels.
if not two_dimension:
y = f(x).ravel()
else:
y = np.column_stack((f(x).ravel(), g(x).ravel()))
# Add random noise.
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y_original = y.astype(np.float32)
if not two_dimension:
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 1)
train_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=_QUANTILE_REGRESSION_SIZE,
num_epochs=None,
shuffle=True)
# Test on the training data to make sure the predictions are calibrated.
test_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=_QUANTILE_REGRESSION_SIZE,
num_epochs=1,
shuffle=False)
return train_input_fn, test_input_fn, y_original
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
self._export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(self._export_dir_base)
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertEqual(global_step, reader.get_tensor(ops.GraphKeys.GLOBAL_STEP))
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testThatLeafIndexIsInPredictions(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("leaf_index" in prediction_dict)
self.assertTrue("logits" in prediction_dict)
def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
# Use core head
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
model = estimator.GradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
model.fit(input_fn=_train_input_fn, steps=15)
model.evaluate(input_fn=_eval_input_fn, steps=1)
model.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForClassifier(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForRegressor(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
regressor = estimator.GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
regressor.fit(input_fn=_train_input_fn, steps=15)
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
use_core_libs=True,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
model.fit(input_fn=_ranking_train_input_fn, steps=1000)
model.evaluate(input_fn=_ranking_train_input_fn, steps=1)
model.predict(input_fn=_infer_ranking_train_input_fn)
def testDoesNotOverrideGlobalSteps(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 2
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=False)
classifier.fit(input_fn=_train_input_fn, steps=15)
# When no override of global steps, 5 steps were used.
self._assert_checkpoint(classifier.model_dir, global_step=5)
def testOverridesGlobalSteps(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 2
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=False,
override_global_step_value=10000000)
classifier.fit(input_fn=_train_input_fn, steps=15)
self._assert_checkpoint(classifier.model_dir, global_step=10000000)
def testFitAndEvaluateMultiClassTreePerClassDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
def testFitAndEvaluateMultiClassDiagonalDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
def testFitAndEvaluateMultiClassFullDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
# One dimensional quantile regression.
def testQuantileRegression(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 6
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns()
# 95% percentile.
model_upper = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=12,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["scores"])
frac_below_upper = round(1. * np.count_nonzero(upper > y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper >= 0.92)
self.assertTrue(frac_below_upper <= 0.98)
# Multi-dimensional quantile regression.
def testQuantileRegressionMultiDimLabel(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 6
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns(
two_dimension=True)
# 95% percentile.
model_upper = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
label_dimension=2,
num_trees=18,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["scores"])
count_below_upper = np.count_nonzero(upper > y, axis=0)
count_both_below_upper = np.count_nonzero(np.prod(upper > y, axis=1))
frac_below_upper_0 = round(1. * count_below_upper[0] / len(y), 3)
frac_below_upper_1 = round(1. * count_below_upper[1] / len(y), 3)
frac_both_below_upper = round(1. * count_both_below_upper / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper_0 >= 0.92)
self.assertTrue(frac_below_upper_0 <= 0.98)
self.assertTrue(frac_below_upper_1 >= 0.92)
self.assertTrue(frac_below_upper_1 <= 0.98)
self.assertTrue(frac_both_below_upper >= 0.91)
self.assertTrue(frac_both_below_upper <= 0.99)
class CoreGradientBoostedDecisionTreeEstimators(test_util.TensorFlowTestCase):
def testTrainEvaluateInferDoesNotThrowError(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
est.evaluate(input_fn=_eval_input_fn, steps=1)
est.predict(input_fn=_eval_input_fn)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
est = estimator.CoreGradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
# Train for a few steps.
est.train(input_fn=_ranking_train_input_fn, steps=1000)
est.evaluate(input_fn=_ranking_train_input_fn, steps=1)
est.predict(input_fn=_infer_ranking_train_input_fn)
def testFitAndEvaluateMultiClassTreePerClasssDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testFitAndEvaluateMultiClassDiagonalDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testFitAndEvaluateMultiClassFullDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testWeightedCategoricalColumn(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
feature_columns = [
core_feature_column.weighted_categorical_column(
categorical_column=core_feature_column
.categorical_column_with_vocabulary_list(
key="word", vocabulary_list=["the", "cat", "dog"]),
weight_feature_key="weight")
]
labels = np.array([[1], [1], [0], [0.]], dtype=np.float32)
def _make_input_fn():
def _input_fn():
features_dict = {}
# Sparse tensor representing
# example 0: "cat","the"
# examaple 1: "dog"
# example 2: -
# example 3: "the"
# Weights for the words are 5 - cat, 6- dog and 1 -the.
features_dict["word"] = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [3, 0]],
values=constant_op.constant(["the", "cat", "dog", "the"],
dtype=dtypes.string),
dense_shape=[4, 3])
features_dict["weight"] = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [3, 0]],
values=[1., 5., 6., 1.],
dense_shape=[4, 3])
return features_dict, labels
return _input_fn
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=feature_columns)
input_fn = _make_input_fn()
est.train(input_fn=input_fn, steps=100)
est.evaluate(input_fn=input_fn, steps=1)
est.predict(input_fn=input_fn)
# Quantile regression in core is the same as in non core estimator, so we
# just check that it does not fail.
def testQuantileRegressionDoesNotThroughException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns()
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 1)
# 95% percentile.
model_upper = estimator.CoreGradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=1,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.train(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient boosted trees implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.boosted_trees.estimator_batch import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GTFlow Model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib import learn
from tensorflow.contrib.boosted_trees.estimator_batch import estimator_utils
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_util
class ModelBuilderOutputType(object):
MODEL_FN_OPS = 0
ESTIMATOR_SPEC = 1
def model_builder(features,
labels,
mode,
params,
config,
output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Multi-machine batch gradient descent tree model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* learner_config: A config for the learner.
* feature_columns: An iterable containing all the feature columns used by
the model.
* examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
* weight_column_name: The name of weight column.
* center_bias: Whether a separate tree should be created for first fitting
the bias.
* override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
config: `RunConfig` of the estimator.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
head = params["head"]
learner_config = params["learner_config"]
examples_per_layer = params["examples_per_layer"]
feature_columns = params["feature_columns"]
weight_column_name = params["weight_column_name"]
num_trees = params["num_trees"]
use_core_libs = params["use_core_libs"]
logits_modifier_function = params["logits_modifier_function"]
output_leaf_index = params["output_leaf_index"]
override_global_step_value = params.get("override_global_step_value", None)
num_quantiles = params["num_quantiles"]
if features is None:
raise ValueError("At least one feature must be specified.")
if config is None:
raise ValueError("Missing estimator RunConfig.")
if config.session_config is not None:
session_config = config.session_config
session_config.allow_soft_placement = True
else:
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
config = config.replace(session_config=session_config)
center_bias = params["center_bias"]
if isinstance(features, ops.Tensor):
features = {features.name: features}
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
training_features = copy.copy(features)
training_features.pop(weight_column_name, None)
global_step = training_util.get_global_step()
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
# Create GBDT model.
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=training_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index,
num_quantiles=num_quantiles)
with ops.name_scope("gbdt", "gbdt_optimizer"):
predictions_dict = gbdt_model.predict(mode)
logits = predictions_dict["predictions"]
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
create_estimator_spec_op = getattr(head, "create_estimator_spec", None)
training_hooks = []
if num_trees:
if center_bias:
num_trees += 1
finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
training_hooks.append(
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value))
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
if use_core_libs and callable(create_estimator_spec_op):
model_fn_ops = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
model_fn_ops)
else:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
gbdt_batch.LEAF_INDEX]
model_fn_ops.training_hooks.extend(training_hooks)
return model_fn_ops
elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
assert callable(create_estimator_spec_op)
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
estimator_spec.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
gbdt_batch.LEAF_INDEX]
estimator_spec = estimator_spec._replace(
training_hooks=training_hooks + list(estimator_spec.training_hooks))
return estimator_spec
return model_fn_ops
def ranking_model_builder(features,
labels,
mode,
params,
config,
output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Multi-machine batch gradient descent tree model for ranking.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* learner_config: A config for the learner.
* feature_columns: An iterable containing all the feature columns used by
the model.
* examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
* weight_column_name: The name of weight column.
* center_bias: Whether a separate tree should be created for first fitting
the bias.
* ranking_model_pair_keys (Optional): Keys to distinguish between features
for left and right part of the training pairs for ranking. For example,
for an Example with features "a.f1" and "b.f1", the keys would be
("a", "b").
* override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
config: `RunConfig` of the estimator.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
head = params["head"]
learner_config = params["learner_config"]
examples_per_layer = params["examples_per_layer"]
feature_columns = params["feature_columns"]
weight_column_name = params["weight_column_name"]
num_trees = params["num_trees"]
use_core_libs = params["use_core_libs"]
logits_modifier_function = params["logits_modifier_function"]
output_leaf_index = params["output_leaf_index"]
ranking_model_pair_keys = params["ranking_model_pair_keys"]
override_global_step_value = params.get("override_global_step_value", None)
num_quantiles = params["num_quantiles"]
if features is None:
raise ValueError("At least one feature must be specified.")
if config is None:
raise ValueError("Missing estimator RunConfig.")
center_bias = params["center_bias"]
if isinstance(features, ops.Tensor):
features = {features.name: features}
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
training_features = copy.copy(features)
training_features.pop(weight_column_name, None)
global_step = training_util.get_global_step()
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
# Extract the features.
if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL:
# For ranking pairwise training, we extract two sets of features.
if len(ranking_model_pair_keys) != 2:
raise ValueError("You must provide keys for ranking.")
left_pair_key = ranking_model_pair_keys[0]
right_pair_key = ranking_model_pair_keys[1]
if left_pair_key is None or right_pair_key is None:
raise ValueError("Both pair keys should be provided for ranking.")
features_1 = {}
features_2 = {}
for name in training_features:
feature = training_features[name]
new_name = name[2:]
if name.startswith(left_pair_key + "."):
features_1[new_name] = feature
else:
assert name.startswith(right_pair_key + ".")
features_2[new_name] = feature
main_features = features_1
supplementary_features = features_2
else:
# For non-ranking or inference ranking, we have only 1 set of features.
main_features = training_features
# Create GBDT model.
gbdt_model_main = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=main_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index,
num_quantiles=num_quantiles)
with ops.name_scope("gbdt", "gbdt_optimizer"):
# Logits for inference.
if mode == learn.ModeKeys.INFER:
predictions_dict = gbdt_model_main.predict(mode)
logits = predictions_dict[gbdt_batch.PREDICTIONS]
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
else:
gbdt_model_supplementary = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=supplementary_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index)
# Logits for train and eval.
if not supplementary_features:
raise ValueError("Features for ranking must be specified.")
predictions_dict_1 = gbdt_model_main.predict(mode)
predictions_1 = predictions_dict_1[gbdt_batch.PREDICTIONS]
predictions_dict_2 = gbdt_model_supplementary.predict(mode)
predictions_2 = predictions_dict_2[gbdt_batch.PREDICTIONS]
logits = predictions_1 - predictions_2
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
predictions_dict = predictions_dict_1
predictions_dict[gbdt_batch.PREDICTIONS] = logits
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
update_op = gbdt_model_main.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
create_estimator_spec_op = getattr(head, "create_estimator_spec", None)
training_hooks = []
if num_trees:
if center_bias:
num_trees += 1
finalized_trees, attempted_trees = (
gbdt_model_main.get_number_of_trees_tensor())
training_hooks.append(
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value))
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
if use_core_libs and callable(create_estimator_spec_op):
model_fn_ops = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
model_fn_ops)
else:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
gbdt_batch.LEAF_INDEX]
model_fn_ops.training_hooks.extend(training_hooks)
return model_fn_ops
elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
assert callable(create_estimator_spec_op)
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
estimator_spec = estimator_spec._replace(
training_hooks=training_hooks + list(estimator_spec.training_hooks))
return estimator_spec
return model_fn_ops
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for converting between core and contrib feature columns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as contrib_model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output
_CORE_MODE_TO_CONTRIB_MODE_ = {
model_fn_lib.ModeKeys.TRAIN: contrib_model_fn_lib.ModeKeys.TRAIN,
model_fn_lib.ModeKeys.EVAL: contrib_model_fn_lib.ModeKeys.EVAL,
model_fn_lib.ModeKeys.PREDICT: contrib_model_fn_lib.ModeKeys.INFER
}
def _core_mode_to_contrib_mode(mode):
return _CORE_MODE_TO_CONTRIB_MODE_[mode]
def _export_outputs_to_output_alternatives(export_outputs):
"""Converts EstimatorSpec.export_outputs to output_alternatives.
Args:
export_outputs: export_outputs created by create_estimator_spec.
Returns:
converted output_alternatives.
"""
output = {}
if export_outputs is not None:
for key, value in export_outputs.items():
if isinstance(value, export_output.ClassificationOutput):
exported_predictions = {
prediction_key.PredictionKey.SCORES: value.scores,
prediction_key.PredictionKey.CLASSES: value.classes
}
output[key] = (constants.ProblemType.CLASSIFICATION,
exported_predictions)
return output
return None
def estimator_spec_to_model_fn_ops(estimator_spec, export_alternatives=False):
if export_alternatives:
alternatives = _export_outputs_to_output_alternatives(
estimator_spec.export_outputs)
else:
alternatives = []
return model_fn.ModelFnOps(
mode=_core_mode_to_contrib_mode(estimator_spec.mode),
predictions=estimator_spec.predictions,
loss=estimator_spec.loss,
train_op=estimator_spec.train_op,
eval_metric_ops=estimator_spec.eval_metric_ops,
output_alternatives=alternatives)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/estimator_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for combined DNN + GBDT estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.boosted_trees.estimator_batch import dnn_tree_combined_estimator as estimator
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator import exporter
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.export import export
from tensorflow.python.ops import parsing_ops
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
def _train_input_fn():
features = {
"x": constant_op.constant([[2.], [1.], [1.]])
}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {
"x": constant_op.constant([[1.], [2.], [2.]])
}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
class DNNBoostedTreeCombinedTest(test_util.TensorFlowTestCase):
def testClassifierContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedClassifier)
def testRegressorContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedRegressor)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedEstimator)
def testNoDNNFeatureColumns(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
with self.assertRaisesRegexp(
ValueError,
"dnn_feature_columns must be specified"):
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2)
classifier.fit(input_fn=_train_input_fn, steps=5)
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[feature_column.real_valued_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[feature_column.real_valued_column("x")])
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
def testFitAndEvaluateWithDistillation(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[feature_column.real_valued_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[feature_column.real_valued_column("x")],
dnn_to_tree_distillation_param=(1, None))
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
class CoreDNNBoostedTreeCombinedTest(test_util.TensorFlowTestCase):
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertEqual(global_step, reader.get_tensor(ops.GraphKeys.GLOBAL_STEP))
def testTrainEvaluateInferDoesNotThrowErrorWithNoDnnInput(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
# 10 steps for dnn, 3 for 1 tree of depth 3 + 1 after the tree finished
self._assert_checkpoint(est.model_dir, global_step=14)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
def testTrainEvaluateInferDoesNotThrowErrorWithDnnInput(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
tree_feature_columns=[])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
def testTrainEvaluateWithDnnForInputAndTreeForPredict(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
predict_with_tree_only=True,
dnn_to_tree_distillation_param=(0.5, None),
tree_feature_columns=[])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
serving_input_fn = (
export.build_parsing_serving_input_receiver_fn(
feature_spec={"x": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)}))
base_exporter = exporter.FinalExporter(
name="Servo",
serving_input_receiver_fn=serving_input_fn,
assets_extra=None)
export_path = os.path.join(model_dir, "export")
base_exporter.export(
est,
export_path=export_path,
checkpoint_path=None,
eval_result={},
is_the_final_export=True)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks for use with GTFlow Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.summary_io import SummaryWriterCache
class FeatureImportanceSummarySaver(session_run_hook.SessionRunHook):
"""Hook to save feature importance summaries."""
def __init__(self, model_dir, every_n_steps=1):
"""Create a FeatureImportanceSummarySaver Hook.
This hook creates scalar summaries representing feature importance
for each feature column during training.
Args:
model_dir: model base output directory.
every_n_steps: frequency, in number of steps, for logging summaries.
Raises:
ValueError: If one of the arguments is invalid.
"""
if model_dir is None:
raise ValueError("model dir must be specified.")
self._model_dir = model_dir
self._every_n_steps = every_n_steps
self._last_triggered_step = None
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use FeatureImportanceSummarySaver.")
graph = ops.get_default_graph()
self._feature_names_tensor = graph.get_tensor_by_name(
"gbdt/feature_names:0")
self._feature_usage_counts_tensor = graph.get_tensor_by_name(
"gbdt/feature_usage_counts:0")
self._feature_gains_tensor = graph.get_tensor_by_name(
"gbdt/feature_gains:0")
def before_run(self, run_context):
del run_context # Unused by feature importance summary saver hook.
requests = {
"global_step": self._global_step_tensor,
"feature_names": self._feature_names_tensor,
"feature_usage_counts": self._feature_usage_counts_tensor,
"feature_gains": self._feature_gains_tensor
}
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
del run_context # Unused by feature importance summary saver hook.
# Read result tensors.
global_step = run_values.results["global_step"]
feature_names = run_values.results["feature_names"]
feature_usage_counts = run_values.results["feature_usage_counts"]
feature_gains = run_values.results["feature_gains"]
# Ensure summaries are logged at desired frequency
if (self._last_triggered_step is not None and
global_step < self._last_triggered_step + self._every_n_steps):
return
# Validate tensors.
if (len(feature_names) != len(feature_usage_counts) or
len(feature_names) != len(feature_gains)):
raise RuntimeError(
"Feature names and importance measures have inconsistent lengths.")
# Compute total usage.
total_usage_count = 0.0
for usage_count in feature_usage_counts:
total_usage_count += usage_count
usage_count_norm = 1.0 / total_usage_count if total_usage_count else 1.0
# Compute total gain.
total_gain = 0.0
for gain in feature_gains:
total_gain += gain
gain_norm = 1.0 / total_gain if total_gain else 1.0
# Output summary for each feature.
self._last_triggered_step = global_step
for (name, usage_count, gain) in zip(feature_names, feature_usage_counts,
feature_gains):
output_dir = os.path.join(self._model_dir, name.decode("utf-8"))
summary_writer = SummaryWriterCache.get(output_dir)
usage_count_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_counts", simple_value=usage_count)
])
usage_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_fraction",
simple_value=usage_count * usage_count_norm)
])
summary_writer.add_summary(usage_count_summary, global_step)
summary_writer.add_summary(usage_fraction_summary, global_step)
gains_summary = Summary(value=[
Summary.Value(tag="feature_importance/gains", simple_value=gain)
])
gains_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/gains_fraction",
simple_value=gain * gain_norm)
])
summary_writer.add_summary(gains_summary, global_step)
summary_writer.add_summary(gains_fraction_summary, global_step)
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs feed_fn and sets the feed_dict accordingly."""
def __init__(self, feed_fn):
self.feed_fn = feed_fn
def before_run(self, run_context):
del run_context # unused by FeedFnHook.
return session_run_hook.SessionRunArgs(fetches=None, feed_dict=self.feed_fn)
class StopAfterNTrees(session_run_hook.SessionRunHook):
"""Stop training after building N full trees."""
def __init__(self, n, num_attempted_trees_tensor, num_finalized_trees_tensor,
override_global_step_value=None):
self._num_trees = n
# num_attempted_trees_tensor and num_finalized_trees_tensor are both
# tensors.
self._num_attempted_trees_tensor = num_attempted_trees_tensor
self._num_finalized_trees_tensor = num_finalized_trees_tensor
self._override_global_step_value = override_global_step_value
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created.")
if self._override_global_step_value is not None:
self._override_global_step_op = state_ops.assign(
self._global_step_tensor, self._override_global_step_value)
def before_run(self, run_context):
del run_context # unused by StopTrainingAfterNTrees.
return session_run_hook.SessionRunArgs({
"num_attempted_trees": self._num_attempted_trees_tensor,
"num_finalized_trees": self._num_finalized_trees_tensor,
})
def after_run(self, run_context, run_values):
num_attempted_trees = run_values.results["num_attempted_trees"]
num_finalized_trees = run_values.results["num_finalized_trees"]
assert num_attempted_trees is not None
assert num_finalized_trees is not None
# Stop when the required number of finalized trees is reached, or when we
# try enough times to build a tree but keep failing.
if (num_finalized_trees >= self._num_trees or
num_attempted_trees > 2 * self._num_trees):
logging.info("Requesting stop since we have reached %d trees.",
num_finalized_trees)
if self._override_global_step_value is not None:
logging.info("Overriding global steps value.")
run_context.session.run(self._override_global_step_op)
run_context.request_stop()
class SwitchTrainOp(session_run_hook.SessionRunHook):
"""Hook that switches the train op after specified number of steps.
Hook that replaces the train op depending on the number of steps of training
that have taken place. The first_train_op is used till train_steps steps
are reached. Thereafter the second_train_op is used.
"""
def __init__(self, first_train_op, train_steps, second_train_op):
"""Initializes a `SwitchTrainOp`."""
self._first_train_op = first_train_op
self._second_train_op = second_train_op
self._train_steps = train_steps
def _get_train_op_for_global_step(self, current_step):
"""Gets train_op for current global step."""
if current_step < self._train_steps:
return self._first_train_op
return self._second_train_op
def begin(self):
self._global_step_tensor = training_util.get_global_step()
self._current_train_op = control_flow_ops.no_op()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SwitchTrainOp.")
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
{"global_step": self._global_step_tensor,
"train_op": self._current_train_op})
def after_run(self, run_context, run_values):
self._current_train_op = self._get_train_op_for_global_step(
run_values.results["global_step"])
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/trainer_hooks.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy to export custom proto formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.decision_trees.proto import generic_tree_model_extensions_pb2
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader as saved_model_loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.util import compat
_SPARSE_FLOAT_FEATURE_NAME_TEMPLATE = "%s_%d"
def make_custom_export_strategy(name,
convert_fn,
feature_columns,
export_input_fn,
use_core_columns=False,
feature_engineering_fn=None,
default_output_alternative_key=None):
"""Makes custom exporter of GTFlow tree format.
Args:
name: A string, for the name of the export strategy.
convert_fn: A function that converts the tree proto to desired format and
saves it to the desired location. Can be None to skip conversion.
feature_columns: A list of feature columns.
export_input_fn: A function that takes no arguments and returns an
`InputFnOps`.
use_core_columns: A boolean, whether core feature columns were used.
feature_engineering_fn: Feature eng function to be called on the input.
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
Not needed for single-headed models.
Returns:
An `ExportStrategy`.
"""
base_strategy = saved_model_export_utils.make_export_strategy(
serving_input_fn=export_input_fn,
strip_default_attrs=True,
default_output_alternative_key=default_output_alternative_key)
input_fn = export_input_fn()
features = input_fn.features
if feature_engineering_fn is not None:
features, _ = feature_engineering_fn(features, labels=None)
(sorted_feature_names, dense_floats, sparse_float_indices, _, _,
sparse_int_indices, _, _) = gbdt_batch.extract_features(
features, feature_columns, use_core_columns)
def export_fn(estimator, export_dir, checkpoint_path=None, eval_result=None):
"""A wrapper to export to SavedModel, and convert it to other formats."""
result_dir = base_strategy.export(estimator, export_dir,
checkpoint_path,
eval_result)
with ops.Graph().as_default() as graph:
with tf_session.Session(graph=graph) as sess:
saved_model_loader.load(
sess, [tag_constants.SERVING], result_dir)
# Note: This is GTFlow internal API and might change.
ensemble_model = graph.get_operation_by_name(
"ensemble_model/TreeEnsembleSerialize")
_, dfec_str = sess.run(ensemble_model.outputs)
dtec = tree_config_pb2.DecisionTreeEnsembleConfig()
dtec.ParseFromString(dfec_str)
# Export the result in the same folder as the saved model.
if convert_fn:
convert_fn(dtec, sorted_feature_names,
len(dense_floats),
len(sparse_float_indices),
len(sparse_int_indices), result_dir, eval_result)
feature_importances = _get_feature_importances(
dtec, sorted_feature_names,
len(dense_floats),
len(sparse_float_indices), len(sparse_int_indices))
sorted_by_importance = sorted(
feature_importances.items(), key=lambda x: -x[1])
assets_dir = os.path.join(
compat.as_bytes(result_dir), compat.as_bytes("assets.extra"))
gfile.MakeDirs(assets_dir)
with gfile.GFile(os.path.join(
compat.as_bytes(assets_dir),
compat.as_bytes("feature_importances")), "w") as f:
f.write("\n".join("%s, %f" % (k, v) for k, v in sorted_by_importance))
return result_dir
return export_strategy.ExportStrategy(
name, export_fn, strip_default_attrs=True)
def convert_to_universal_format(dtec, sorted_feature_names,
num_dense, num_sparse_float,
num_sparse_int,
feature_name_to_proto=None):
"""Convert GTFlow trees to universal format."""
del num_sparse_int # unused.
model_and_features = generic_tree_model_pb2.ModelAndFeatures()
# TODO(jonasz): Feature descriptions should contain information about how each
# feature is processed before it's fed to the model (e.g. bucketing
# information). As of now, this serves as a list of features the model uses.
for feature_name in sorted_feature_names:
if not feature_name_to_proto:
model_and_features.features[feature_name].SetInParent()
else:
model_and_features.features[feature_name].CopyFrom(
feature_name_to_proto[feature_name])
model = model_and_features.model
model.ensemble.summation_combination_technique.SetInParent()
for tree_idx in range(len(dtec.trees)):
gtflow_tree = dtec.trees[tree_idx]
tree_weight = dtec.tree_weights[tree_idx]
member = model.ensemble.members.add()
member.submodel_id.value = tree_idx
tree = member.submodel.decision_tree
for node_idx in range(len(gtflow_tree.nodes)):
gtflow_node = gtflow_tree.nodes[node_idx]
node = tree.nodes.add()
node_type = gtflow_node.WhichOneof("node")
node.node_id.value = node_idx
if node_type == "leaf":
leaf = gtflow_node.leaf
if leaf.HasField("vector"):
for weight in leaf.vector.value:
new_value = node.leaf.vector.value.add()
new_value.float_value = weight * tree_weight
else:
for index, weight in zip(
leaf.sparse_vector.index, leaf.sparse_vector.value):
new_value = node.leaf.sparse_vector.sparse_value[index]
new_value.float_value = weight * tree_weight
else:
node = node.binary_node
# Binary nodes here.
if node_type == "dense_float_binary_split":
split = gtflow_node.dense_float_binary_split
feature_id = split.feature_column
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = sorted_feature_names[feature_id]
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "sparse_float_binary_split_default_left":
split = gtflow_node.sparse_float_binary_split_default_left.split
node.default_direction = (generic_tree_model_pb2.BinaryNode.LEFT)
feature_id = split.feature_column + num_dense
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = (
_SPARSE_FLOAT_FEATURE_NAME_TEMPLATE %
(sorted_feature_names[feature_id], split.dimension_id))
model_and_features.features.pop(sorted_feature_names[feature_id])
(model_and_features.features[inequality_test.feature_id.id.value]
.SetInParent())
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "sparse_float_binary_split_default_right":
split = gtflow_node.sparse_float_binary_split_default_right.split
node.default_direction = (
generic_tree_model_pb2.BinaryNode.RIGHT)
# TODO(nponomareva): adjust this id assignement when we allow multi-
# column sparse tensors.
feature_id = split.feature_column + num_dense
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = (
_SPARSE_FLOAT_FEATURE_NAME_TEMPLATE %
(sorted_feature_names[feature_id], split.dimension_id))
model_and_features.features.pop(sorted_feature_names[feature_id])
(model_and_features.features[inequality_test.feature_id.id.value]
.SetInParent())
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "categorical_id_binary_split":
split = gtflow_node.categorical_id_binary_split
node.default_direction = generic_tree_model_pb2.BinaryNode.RIGHT
feature_id = split.feature_column + num_dense + num_sparse_float
categorical_test = (
generic_tree_model_extensions_pb2.MatchingValuesTest())
categorical_test.feature_id.id.value = sorted_feature_names[
feature_id]
matching_id = categorical_test.value.add()
matching_id.int64_value = split.feature_id
node.custom_left_child_test.Pack(categorical_test)
elif (node_type == "oblivious_dense_float_binary_split" or
node_type == "oblivious_categorical_id_binary_split"):
raise ValueError("Universal tree format doesn't support oblivious "
"trees")
else:
raise ValueError("Unexpected node type %s" % node_type)
node.left_child_id.value = split.left_id
node.right_child_id.value = split.right_id
return model_and_features
def _get_feature_importances(dtec, feature_names, num_dense_floats,
num_sparse_float, num_sparse_int):
"""Export the feature importance per feature column."""
del num_sparse_int # Unused.
sums = collections.defaultdict(lambda: 0)
for tree_idx in range(len(dtec.trees)):
tree = dtec.trees[tree_idx]
for tree_node in tree.nodes:
node_type = tree_node.WhichOneof("node")
if node_type == "dense_float_binary_split":
split = tree_node.dense_float_binary_split
split_column = feature_names[split.feature_column]
elif node_type == "sparse_float_binary_split_default_left":
split = tree_node.sparse_float_binary_split_default_left.split
split_column = _SPARSE_FLOAT_FEATURE_NAME_TEMPLATE % (
feature_names[split.feature_column + num_dense_floats],
split.dimension_id)
elif node_type == "sparse_float_binary_split_default_right":
split = tree_node.sparse_float_binary_split_default_right.split
split_column = _SPARSE_FLOAT_FEATURE_NAME_TEMPLATE % (
feature_names[split.feature_column + num_dense_floats],
split.dimension_id)
elif node_type == "categorical_id_binary_split":
split = tree_node.categorical_id_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "oblivious_dense_float_binary_split":
split = tree_node.oblivious_dense_float_binary_split
split_column = feature_names[split.feature_column]
elif node_type == "oblivious_categorical_id_binary_split":
split = tree_node.oblivious_categorical_id_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "categorical_id_set_membership_binary_split":
split = tree_node.categorical_id_set_membership_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "leaf":
assert tree_node.node_metadata.gain == 0
continue
else:
raise ValueError("Unexpected split type %s" % node_type)
# Apply shrinkage factor. It is important since it is not always uniform
# across different trees.
sums[split_column] += (
tree_node.node_metadata.gain * dtec.tree_weights[tree_idx])
return dict(sums)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GTFlow Estimator definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.boosted_trees.estimator_batch import model
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.estimator.canned import head as core_head_lib
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses as core_losses
from tensorflow.contrib.boosted_trees.estimator_batch import custom_loss_head
from tensorflow.python.ops import array_ops
# ================== Old estimator interface===================================
# The estimators below were designed for old feature columns and old estimator
# interface. They can be used with new feature columns and losses by setting
# use_core_libs = True.
class GradientBoostedDecisionTreeClassifier(estimator.Estimator):
"""An estimator using gradient boosted decision trees."""
def __init__(self,
learner_config,
examples_per_layer,
n_classes=2,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeClassifier estimator instance.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
n_classes: Number of classes in the classification.
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. It is a Tensor of rank 2 and its shape is
[batch_size, num_trees]. For example, result_iter =
classifier.predict(...)
for result_dict in result_iter: # access leaf index list by
result_dict["leaf_index"] # which contains one leaf index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: If learner_config is not valid.
"""
if n_classes > 2:
# For multi-class classification, use our loss implementation that
# supports second order derivative.
def loss_fn(labels, logits, weights=None):
result = losses.per_example_maxent_loss(
labels=labels,
logits=logits,
weights=weights,
num_classes=n_classes)
return math_ops.reduce_mean(result[0])
else:
loss_fn = None
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=False,
loss_fn=loss_fn,
label_keys=label_keys)
if learner_config.num_classes == 0:
learner_config.num_classes = n_classes
elif learner_config.num_classes != n_classes:
raise ValueError("n_classes (%d) doesn't match learner_config (%d)." %
(learner_config.num_classes, n_classes))
super(GradientBoostedDecisionTreeClassifier, self).__init__(
model_fn=model.model_builder,
params={
'head': head,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'center_bias': center_bias,
'logits_modifier_function': logits_modifier_function,
'use_core_libs': use_core_libs,
'output_leaf_index': output_leaf_index,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class GradientBoostedDecisionTreeRegressor(estimator.Estimator):
"""An estimator using gradient boosted decision trees."""
def __init__(self,
learner_config,
examples_per_layer,
label_dimension=1,
num_trees=None,
feature_columns=None,
label_name=None,
weight_column_name=None,
model_dir=None,
config=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeRegressor estimator instance.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
label_name: String, name of the key in label dict. Can be null if label is
a tensor (single headed models).
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. For example, result_dict =
classifier.predict(...)
for example_prediction_result in result_dict: # access leaf index list
by example_prediction_result["leaf_index"] # which contains one leaf
index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
"""
head = head_lib.regression_head(
label_name=label_name,
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=False)
if label_dimension == 1:
learner_config.num_classes = 2
else:
learner_config.num_classes = label_dimension
super(GradientBoostedDecisionTreeRegressor, self).__init__(
model_fn=model.model_builder,
params={
'head': head,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'logits_modifier_function': logits_modifier_function,
'center_bias': center_bias,
'use_core_libs': use_core_libs,
'output_leaf_index': False,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class GradientBoostedDecisionTreeEstimator(estimator.Estimator):
"""An estimator using gradient boosted decision trees.
Useful for training with user specified `Head`.
"""
def __init__(self,
learner_config,
examples_per_layer,
head,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeEstimator estimator instance.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
head: `Head` instance.
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. For example, result_dict =
classifier.predict(...)
for example_prediction_result in result_dict: # access leaf index list
by example_prediction_result["leaf_index"] # which contains one leaf
index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
"""
super(GradientBoostedDecisionTreeEstimator, self).__init__(
model_fn=model.model_builder,
params={
'head': head,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'logits_modifier_function': logits_modifier_function,
'center_bias': center_bias,
'use_core_libs': use_core_libs,
'output_leaf_index': False,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class GradientBoostedDecisionTreeRanker(estimator.Estimator):
"""A ranking estimator using gradient boosted decision trees."""
def __init__(self,
learner_config,
examples_per_layer,
head,
ranking_model_pair_keys,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=False,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeRanker instance.
This is an estimator that can be trained off the pairwise data and can be
used for inference on non-paired data. This is essentially LambdaMart.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
head: `Head` instance.
ranking_model_pair_keys: Keys to distinguish between features for left and
right part of the training pairs for ranking. For example, for an
Example with features "a.f1" and "b.f1", the keys would be ("a", "b").
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. It is a Tensor of rank 2 and its shape is
[batch_size, num_trees]. For example, result_iter =
classifier.predict(...)
for result_dict in result_iter: # access leaf index list by
result_dict["leaf_index"] # which contains one leaf index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: If learner_config is not valid.
"""
super(GradientBoostedDecisionTreeRanker, self).__init__(
model_fn=model.ranking_model_builder,
params={
'head': head,
'n_classes': 2,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'center_bias': center_bias,
'logits_modifier_function': logits_modifier_function,
'use_core_libs': use_core_libs,
'output_leaf_index': output_leaf_index,
'ranking_model_pair_keys': ranking_model_pair_keys,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
# When using this estimator, make sure to regularize the hessian (at least l2,
# min_node_weight)!
# TODO(nponomareva): extend to take multiple quantiles in one go.
class GradientBoostedDecisionTreeQuantileRegressor(estimator.Estimator):
"""An estimator that does quantile regression and returns quantile estimates."""
def __init__(self,
learner_config,
examples_per_layer,
quantiles,
label_dimension=1,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeQuantileRegressor instance.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
quantiles: a list of quantiles for the loss, each between 0 and 1.
label_dimension: Dimension of regression label. This is the size of the
last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`). When label_dimension>1, it is
recommended to use multiclass strategy diagonal hessian or full hessian.
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. For example, result_dict =
classifier.predict(...)
for example_prediction_result in result_dict: # access leaf index list
by example_prediction_result["leaf_index"] # which contains one leaf
index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
"""
if len(quantiles) > 1:
raise ValueError('For now, just one quantile per estimator is supported')
def _quantile_regression_head(quantile):
# Use quantile regression.
head = custom_loss_head.CustomLossHead(
loss_fn=functools.partial(
losses.per_example_quantile_regression_loss, quantile=quantile),
link_fn=array_ops.identity,
logit_dimension=label_dimension)
return head
learner_config.num_classes = max(2, label_dimension)
super(GradientBoostedDecisionTreeQuantileRegressor, self).__init__(
model_fn=model.model_builder,
params={
'head': _quantile_regression_head(quantiles[0]),
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'logits_modifier_function': logits_modifier_function,
'center_bias': center_bias,
'use_core_libs': use_core_libs,
'output_leaf_index': False,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
# ================== New Estimator interface===================================
# The estimators below use new core Estimator interface and must be used with
# new feature columns and heads.
# For multiclass classification, use the following head since it uses loss
# that is twice differentiable.
def core_multiclass_head(
n_classes,
weight_column=None,
loss_reduction=core_losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
"""Core head for multiclass problems."""
def loss_fn(labels, logits):
result = losses.per_example_maxent_loss(
# Don't pass the weights: head already multiplies by them.
labels=labels, logits=logits, weights=None, num_classes=n_classes)
return result[0]
# pylint:disable=protected-access
head_fn = core_head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=n_classes,
loss_fn=loss_fn,
loss_reduction=loss_reduction,
weight_column=weight_column)
# pylint:enable=protected-access
return head_fn
# For quantile regression, use this head with Core..Estimator, or use
# Core..QuantileRegressor directly,
def core_quantile_regression_head(
quantiles,
label_dimension=1,
weight_column=None,
loss_reduction=core_losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
"""Core head for quantile regression problems."""
def loss_fn(labels, logits):
result = losses.per_example_quantile_regression_loss(
labels=labels,
predictions=logits,
# Don't pass the weights: head already multiplies by them.
weights=None,
quantile=quantiles)
return result[0]
# pylint:disable=protected-access
head_fn = core_head_lib._regression_head(
label_dimension=label_dimension,
loss_fn=loss_fn,
loss_reduction=loss_reduction,
weight_column=weight_column)
# pylint:enable=protected-access
return head_fn
class CoreGradientBoostedDecisionTreeEstimator(core_estimator.Estimator):
"""An estimator using gradient boosted decision trees.
Useful for training with user specified `Head`.
"""
def __init__(self,
learner_config,
examples_per_layer,
head,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
output_leaf_index=False,
num_quantiles=100):
"""Initializes a core version of GradientBoostedDecisionTreeEstimator.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
head: `Head` instance.
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. For example, result_dict =
classifier.predict(...)
for example_prediction_result in result_dict: # access leaf index list
by example_prediction_result["leaf_index"] # which contains one leaf
index per tree
num_quantiles: Number of quantiles to build for numeric feature values.
"""
def _model_fn(features, labels, mode, config):
return model.model_builder(
features=features,
labels=labels,
mode=mode,
config=config,
params={
'head': head,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'center_bias': center_bias,
'logits_modifier_function': logits_modifier_function,
'use_core_libs': True,
'output_leaf_index': output_leaf_index,
'override_global_step_value': None,
'num_quantiles': num_quantiles,
},
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC)
super(CoreGradientBoostedDecisionTreeEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class CoreGradientBoostedDecisionTreeRanker(core_estimator.Estimator):
"""A ranking estimator using gradient boosted decision trees."""
def __init__(self,
learner_config,
examples_per_layer,
head,
ranking_model_pair_keys,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
logits_modifier_function=None,
center_bias=False,
output_leaf_index=False,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeRanker instance.
This is an estimator that can be trained off the pairwise data and can be
used for inference on non-paired data. This is essentially LambdaMart.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
head: `Head` instance.
ranking_model_pair_keys: Keys to distinguish between features for left and
right part of the training pairs for ranking. For example, for an
Example with features "a.f1" and "b.f1", the keys would be ("a", "b").
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. It is a Tensor of rank 2 and its shape is
[batch_size, num_trees]. For example, result_iter =
classifier.predict(...)
for result_dict in result_iter: # access leaf index list by
result_dict["leaf_index"] # which contains one leaf index per tree
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: If learner_config is not valid.
"""
def _model_fn(features, labels, mode, config):
return model.ranking_model_builder(
features=features,
labels=labels,
mode=mode,
config=config,
params={
'head': head,
'n_classes': 2,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'center_bias': center_bias,
'logits_modifier_function': logits_modifier_function,
'use_core_libs': True,
'output_leaf_index': output_leaf_index,
'ranking_model_pair_keys': ranking_model_pair_keys,
'override_global_step_value': None,
'num_quantiles': num_quantiles,
},
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC)
super(CoreGradientBoostedDecisionTreeRanker, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
# When using this estimator, make sure to regularize the hessian (at least l2,
# min_node_weight)!
# TODO(nponomareva): extend to take multiple quantiles in one go.
class CoreGradientBoostedDecisionTreeQuantileRegressor(
core_estimator.Estimator):
"""An estimator that does quantile regression and returns quantile estimates."""
def __init__(self,
learner_config,
examples_per_layer,
quantiles,
label_dimension=1,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
output_leaf_index=False,
num_quantiles=100):
"""Initializes a core version of GradientBoostedDecisionTreeEstimator.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
quantiles: a list of quantiles for the loss, each between 0 and 1.
label_dimension: Dimension of regression label. This is the size of the
last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`). When label_dimension>1, it is
recommended to use multiclass strategy diagonal hessian or full hessian.
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. For example, result_dict =
classifier.predict(...)
for example_prediction_result in result_dict: # access leaf index list
by example_prediction_result["leaf_index"] # which contains one leaf
index per tree
num_quantiles: Number of quantiles to build for numeric feature values.
"""
if len(quantiles) > 1:
raise ValueError('For now, just one quantile per estimator is supported')
def _model_fn(features, labels, mode, config):
return model.model_builder(
features=features,
labels=labels,
mode=mode,
config=config,
params={
'head':
core_quantile_regression_head(
quantiles[0],
label_dimension=label_dimension,
weight_column=weight_column_name),
'feature_columns':
feature_columns,
'learner_config':
learner_config,
'num_trees':
num_trees,
'weight_column_name':
weight_column_name,
'examples_per_layer':
examples_per_layer,
'center_bias':
center_bias,
'logits_modifier_function':
logits_modifier_function,
'use_core_libs':
True,
'output_leaf_index':
output_leaf_index,
'override_global_step_value':
None,
'num_quantiles':
num_quantiles,
},
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC)
super(CoreGradientBoostedDecisionTreeQuantileRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of `head.Head` with custom loss and link function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
class CustomLossHead(head_lib._RegressionHead): # pylint: disable=protected-access
"""A Head object with custom loss function and link function."""
def __init__(self,
loss_fn,
link_fn,
logit_dimension,
head_name=None,
weight_column_name=None,
metrics_fn=None):
"""`Head` for specifying arbitrary loss function.
Args:
loss_fn: Loss function.
link_fn: Function that converts logits to prediction.
logit_dimension: Number of dimensions for the logits.
head_name: name of the head. Predictions, summary, metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
metrics_fn: a function that takes predictions dict, labels and weights and
returns a dictionary of metrics to be calculated.
"""
def loss_wrapper(labels, logits, weight_tensor):
if weight_tensor is None:
weight_tensor = array_ops.ones(
shape=[array_ops.shape(labels)[0], 1], dtype=dtypes.float32)
weighted_loss, _ = loss_fn(labels, weight_tensor, logits)
average_loss = math_ops.reduce_mean(weighted_loss)
return average_loss, average_loss / math_ops.reduce_mean(weight_tensor)
super(CustomLossHead, self).__init__(
loss_fn=loss_wrapper,
link_fn=link_fn,
head_name=head_name,
weight_column_name=weight_column_name,
enable_centered_bias=False,
label_dimension=logit_dimension)
self._metrics_fn = metrics_fn
def _metrics(self, eval_loss, predictions, labels, weights):
if self._metrics_fn is not None:
return self._metrics_fn(predictions, labels, weights)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/custom_loss_head.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trainer hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
class FeatureImportanceSummarySaverTest(test_util.TensorFlowTestCase):
def test_invalid_input(self):
with self.assertRaises(ValueError):
trainer_hooks.FeatureImportanceSummarySaver(model_dir=None)
def test_invalid_graph(self):
# Create inputs.
model_dir = tempfile.mkdtemp()
hook = trainer_hooks.FeatureImportanceSummarySaver(model_dir)
with ops.Graph().as_default():
# Begin won't be able to find the required tensors in the graph.
_ = variables.get_or_create_global_step()
with self.assertRaises(KeyError):
hook.begin()
def test_run(self):
# Create inputs.
model_dir = tempfile.mkdtemp()
hook = trainer_hooks.FeatureImportanceSummarySaver(model_dir)
with ops.Graph().as_default(), tf_session.Session() as sess:
global_step = variables.get_or_create_global_step()
with ops.name_scope("gbdt"):
constant_op.constant(["featA", "featB"], name="feature_names")
constant_op.constant([0, 2], name="feature_usage_counts")
constant_op.constant([0, 0.8], name="feature_gains")
# Begin finds tensors in the graph.
hook.begin()
sess.run(tf_variables.global_variables_initializer())
# Run hook in a monitored session.
train_op = state_ops.assign_add(global_step, 1)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
hook.end(sess)
# Ensure output summary dirs are created.
self.assertTrue(os.path.exists(os.path.join(model_dir, "featA")))
self.assertTrue(os.path.exists(os.path.join(model_dir, "featB")))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/estimator_batch/trainer_hooks_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient boosted trees implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.boosted_trees.python.ops import *
# pylint: enable=unused-import,wildcard-import
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class StatsAccumulatorScalarTest(test_util.TensorFlowTestCase):
"""Tests for scalar gradients and hessians accumulator."""
def testSimpleAcculumator(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator.initializer]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
op2 = accumulator.add(0, [1], [[2, 0]], [0.1], [0.2])
with ops.control_dependencies([op1, op2]):
num_updates, partition, bucket_ids, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, bucket_ids, grads, hessians = sess.run(
[num_updates, partition, bucket_ids, grads, hessians])
result = _AccumulatorResultToDict(partition, bucket_ids, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
# Key is partition, bucket, dimension
self.assertAllClose(result[(1, 2, 0)], [0.2, 0.4])
self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])
def testMultidimensionalAcculumator(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator.initializer]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2, 1],
feature_ids=[[2, 2], [3, 0], [2, 2]],
gradients=[0.1, 0.3, 0.8],
hessians=[0.2, 0.4, -9])
op2 = accumulator.add(0, [2, 1], [[3, 1], [2, 2]], [0.1, 1], [0.2, -1])
with ops.control_dependencies([op1, op2]):
num_updates, partition, bucket_ids, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, bucket_ids, grads, hessians = sess.run(
[num_updates, partition, bucket_ids, grads, hessians])
result = _AccumulatorResultToDict(partition, bucket_ids, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 3)
# Key is partition, bucket, dimension.
self.assertAllClose(result[(1, 2, 2)], [1.9, -9.8])
self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])
self.assertAllClose(result[(2, 3, 1)], [0.1, 0.2])
def testDropStaleUpdate(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator.initializer]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
op2 = accumulator.add(
stamp_token=-1,
partition_ids=[1],
feature_ids=[[2, 0]],
gradients=[0.1],
hessians=[0.2])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 1)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 0)], [0.1, 0.2])
self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])
def testSerialize(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator.initializer]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
with ops.control_dependencies([op1]):
(stamp_token, num_updates, partition_1, feature_1, grads_1,
hessians_1) = accumulator.saveable.serialize()
# Make sure that the accumulator hasn't changed during serialization.
with ops.control_dependencies([stamp_token]):
num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
accumulator.flush(stamp_token=0, next_stamp_token=1))
(stamp_token, num_updates, partition_1, feature_1, grads_1, hessians_1,
num_updates_2, partition_2, feature_2, grads_2, hessians_2) = sess.run(
[
stamp_token, num_updates, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2
])
result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,
hessians_1)
result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,
hessians_2)
self.assertEqual(num_updates, 1)
self.assertEqual(num_updates_2, 1)
self.assertEqual(len(result_1), 2)
self.assertAllClose(result_1[(1, 2, 0)], [0.1, 0.2])
self.assertAllClose(result_1[(2, 3, 0)], [0.3, 0.4])
self.assertAllEqual(result_1, result_2)
self.assertEqual(0, stamp_token)
def testDeserialize(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator.initializer]):
# These will be deleted due to deserialize call.
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 1]],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
with ops.control_dependencies([op1]):
deserialize = (
accumulator.saveable.deserialize(
stamp_token=2,
num_updates=3,
partition_ids=[3, 4],
feature_ids=[[5, 0], [6, 2]],
gradients=[0.4, 0.5],
hessians=[0.6, 0.7]))
with ops.control_dependencies([deserialize]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=2, next_stamp_token=3)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads,
hessians)
self.assertEqual(num_updates, 3)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(3, 5, 0)], [0.4, 0.6])
self.assertAllClose(result[(4, 6, 2)], [0.5, 0.7])
def testMakeSummary(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
partition, feature, grads, hessians = accumulator._make_summary(
partition_ids=[1, 2, 1],
feature_ids=[[2, 0], [3, 1], [2, 0]],
gradients=[0.1, 0.3, 0.1],
hessians=[0.2, 0.4, 0.2])
partition, feature, grads, hessians = sess.run(
[partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 0)], [0.2, 0.4])
self.assertAllClose(result[(2, 3, 1)], [0.3, 0.4])
class StatsAccumulatorTensorTest(test_util.TensorFlowTestCase):
"""Tests for tensor gradients and hessians accumulator."""
def testSimpleAcculumator(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator.initializer]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
op2 = accumulator.add(
stamp_token=0,
partition_ids=[1],
feature_ids=[[2, 0]],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 0)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2, 0)][1],
[[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3, 0)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])
def testMultidimensionalAcculumator(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator.initializer]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 4], [3, 1]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
op2 = accumulator.add(
stamp_token=0,
partition_ids=[1],
feature_ids=[[2, 4]],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 4)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2, 4)][1],
[[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3, 1)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3, 1)][1], [[0.05, 0.06], [0.07, 0.08]])
def testDropStaleUpdate(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator.initializer]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 5], [3, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
op2 = accumulator.add(
stamp_token=-1,
partition_ids=[1],
feature_ids=[[2, 5]],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 1)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 5)][0], [0.1, 0.1])
self.assertAllClose(result[(1, 2, 5)][1], [[0.01, 0.02], [0.03, 0.04]])
self.assertAllClose(result[(2, 3, 0)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])
def testSerialize(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator.initializer]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
with ops.control_dependencies([op1]):
(stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1) = accumulator.saveable.serialize()
# Make sure that the accumulator hasn't changed during serialization.
with ops.control_dependencies([stamp_token]):
num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
accumulator.flush(stamp_token=0, next_stamp_token=1))
(stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2) = sess.run([
stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2
])
result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,
hessians_1)
result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,
hessians_2)
self.assertEqual(num_updates_1, 1)
self.assertEqual(num_updates_2, 1)
self.assertEqual(len(result_1), 2)
self.assertAllClose(result_1[(1, 2, 0)][0], [0.1, 0.1])
self.assertAllClose(result_1[(1, 2, 0)][1], [[0.01, 0.02], [0.03, 0.04]])
self.assertAllClose(result_1[(2, 3, 0)][0], [0.2, 0.2])
self.assertAllClose(result_1[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])
self.assertAllEqual(result_1[1, 2, 0][0], result_2[1, 2, 0][0])
self.assertAllEqual(result_1[1, 2, 0][1], result_2[1, 2, 0][1])
self.assertAllEqual(result_1[2, 3, 0][0], result_2[2, 3, 0][0])
self.assertAllEqual(result_1[2, 3, 0][1], result_2[2, 3, 0][1])
def testDeserialize(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator.initializer]):
# These will be deleted due to deserialize call.
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
with ops.control_dependencies([op1]):
deserialize = accumulator.saveable.deserialize(
stamp_token=2,
num_updates=3,
partition_ids=[3, 4],
feature_ids=[[4, 0], [5, 0]],
# Two values for gradients,
gradients=[[0.3, 0.3], [0.5, 0.5]],
# A 2x2 matrix for each hessian.
hessians=[[[0.03, 0.04], [0.05, 0.06]], [[0.07, 0.08], [0.09,
0.10]]])
with ops.control_dependencies([deserialize]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=2, next_stamp_token=3)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads,
hessians)
self.assertEqual(num_updates, 3)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(3, 4, 0)][0], [0.3, 0.3])
self.assertAllClose(result[(3, 4, 0)][1], [[0.03, 0.04], [0.05, 0.06]])
self.assertAllClose(result[(4, 5, 0)][0], [0.5, 0.5])
self.assertAllClose(result[(4, 5, 0)][1], [[0.07, 0.08], [0.09, 0.10]])
def testMakeSummary(self):
with self.cached_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
partition, feature, grads, hessians = accumulator._make_summary(
partition_ids=[1, 2, 1],
feature_ids=[[2, 0], [3, 2], [2, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2], [0.10, 0.11]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07, 0.08]],
[[0.011, 0.022], [0.033, 0.044]]])
partition, feature, grads, hessians = sess.run(
[partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 0)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2, 0)][1],
[[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3, 2)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3, 2)][1], [[0.05, 0.06], [0.07, 0.08]])
def _AccumulatorResultToDict(partition, feature, grads, hessians):
"""Converts the inputs to a dictionary since the ordering changes."""
return {(partition[i], feature[i, 0], feature[i, 1]): (grads[i], hessians[i])
for i in range(len(partition))}
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/kernel_tests/stats_accumulator_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow model ops.
The tests cover:
- Loading a model from protobufs.
- Running Predictions using an existing model.
- Serializing the model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import saver
def _append_to_leaf(leaf, c_id, w):
"""Helper method for building tree leaves.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_id: class Id for the weight update.
w: weight contribution value.
"""
leaf.sparse_vector.index.append(c_id)
leaf.sparse_vector.value.append(w)
def _set_float_split(split, feat_col, thresh, l_id, r_id):
"""Helper method for building tree float splits.
Sets split feature column, threshold and children.
Args:
split: split node to update.
feat_col: feature column for the split.
thresh: threshold to split on forming rule x <= thresh.
l_id: left child Id.
r_id: right child Id.
"""
split.feature_column = feat_col
split.threshold = thresh
split.left_id = l_id
split.right_id = r_id
class ModelOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up test for model_ops.
Create a batch of two examples having one dense float, two sparse float and
one sparse int features.
The data looks like the following:
| Instance | Dense0 | SparseF0 | SparseF1 | SparseI0 |
| 0 | 7 | -3 | | |
| 1 | -2 | | 4 | 9,1 |
"""
super(ModelOpsTest, self).setUp()
self._dense_float_tensor = np.array([[7.0], [-2.0]])
self._sparse_float_indices1 = np.array([[0, 0]])
self._sparse_float_values1 = np.array([-3.0])
self._sparse_float_shape1 = np.array([2, 1])
self._sparse_float_indices2 = np.array([[1, 0]])
self._sparse_float_values2 = np.array([4.0])
self._sparse_float_shape2 = np.array([2, 1])
self._sparse_int_indices1 = np.array([[1, 0], [1, 1]])
self._sparse_int_values1 = np.array([9, 1])
self._sparse_int_shape1 = np.array([2, 2])
self._seed = 123
def testCreate(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(1.0)
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="create_tree")
resources.initialize_resources(resources.shared_resources()).run()
result, _ = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
self.assertAllClose(result.eval(), [[-0.4], [-0.4]])
stamp_token = model_ops.tree_ensemble_stamp_token(tree_ensemble_handle)
self.assertEqual(stamp_token.eval(), 3)
def testSerialization(self):
with ops.Graph().as_default() as graph:
with self.session(graph):
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
_append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)
tree_ensemble_config.tree_weights.append(1.0)
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_weights.append(1.0)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3,
4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=7,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="saver_tree")
stamp_token, serialized_config = model_ops.tree_ensemble_serialize(
tree_ensemble_handle)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEqual(stamp_token.eval(), 7)
serialized_config = serialized_config.eval()
with ops.Graph().as_default() as graph:
with self.session(graph):
tree_ensemble_handle2 = model_ops.tree_ensemble_variable(
stamp_token=9,
tree_ensemble_config=serialized_config,
name="saver_tree2")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
result, _ = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle2,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# Re-serialize tree.
stamp_token2, serialized_config2 = model_ops.tree_ensemble_serialize(
tree_ensemble_handle2)
# The first example will get bias class 1 -0.2 from first tree and
# leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
# the second example will get the same bias class 1 -0.2 and leaf 3
# payload of class 1 1.2 hence [0.0, 1.0].
self.assertEqual(stamp_token2.eval(), 9)
# Class 2 does have scores in the leaf => it gets score 0.
self.assertEqual(serialized_config2.eval(), serialized_config)
self.assertAllClose(result.eval(), [[0.5, -0.2], [0, 1.0]])
def testRestore(self):
# Calling self.cached_session() without a graph specified results in
# TensorFlowTestCase caching the session and returning the same one
# every time. In this test, we need to create two different sessions
# which is why we also create a graph and pass it to self.cached_session()
# to ensure no caching occurs under the hood.
save_path = os.path.join(self.get_temp_dir(), "restore-test")
with ops.Graph().as_default() as graph:
with self.session(graph) as sess:
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
# Add the first tree and save.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
_append_to_leaf(tree.nodes.add().leaf, 0, -0.1)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="restore_tree")
resources.initialize_resources(resources.shared_resources()).run()
variables.global_variables_initializer().run()
my_saver = saver.Saver()
# Add the second tree and replace the ensemble of the handle.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_weights.append(1.0)
_append_to_leaf(tree2.nodes.add().leaf, 0, -1.0)
# Predict to confirm.
with ops.control_dependencies([
model_ops.tree_ensemble_deserialize(
tree_ensemble_handle,
stamp_token=3,
tree_ensemble_config=tree_ensemble_config.SerializeToString())
]):
result, _ = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
self.assertAllClose([[-1.1], [-1.1]], result.eval())
# Save before adding other trees.
val = my_saver.save(sess, save_path)
self.assertEqual(save_path, val)
# Add more trees after saving.
tree3 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_weights.append(1.0)
_append_to_leaf(tree3.nodes.add().leaf, 0, -10.0)
# Predict to confirm.
with ops.control_dependencies([
model_ops.tree_ensemble_deserialize(
tree_ensemble_handle,
stamp_token=3,
tree_ensemble_config=tree_ensemble_config.SerializeToString())
]):
result, _ = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
self.assertAllClose(result.eval(), [[-11.1], [-11.1]])
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with ops.Graph().as_default() as graph:
with self.session(graph) as sess:
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="restore_tree")
my_saver = saver.Saver()
my_saver.restore(sess, save_path)
result, _ = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# Make sure we only have the first and second tree.
# The third tree was added after the save.
self.assertAllClose(result.eval(), [[-1.1], [-1.1]])
def testUsedHandlers(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_config.growing_metadata.used_handler_ids.append(1)
tree_ensemble_config.growing_metadata.used_handler_ids.append(5)
stamp_token = 3
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=stamp_token,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="create_tree")
resources.initialize_resources(resources.shared_resources()).run()
result = model_ops.tree_ensemble_used_handlers(
tree_ensemble_handle, stamp_token, num_all_handlers=6)
self.assertAllEqual([0, 1, 0, 0, 0, 1], result.used_handlers_mask.eval())
self.assertEqual(2, result.num_used_handlers.eval())
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/kernel_tests/model_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow training Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def _gen_learner_config(num_classes,
l1_reg,
l2_reg,
tree_complexity,
max_depth,
min_node_weight,
pruning_mode,
growing_mode,
dropout_probability=None,
dropout_learning_rate=None,
dropout_prob_of_skipping=None):
"""Create a serialized learner config with the desired settings."""
config = learner_pb2.LearnerConfig()
config.num_classes = num_classes
config.regularization.l1 = l1_reg
config.regularization.l2 = l2_reg
config.regularization.tree_complexity = tree_complexity
config.constraints.max_tree_depth = max_depth
config.constraints.min_node_weight = min_node_weight
config.pruning_mode = pruning_mode
config.growing_mode = growing_mode
if dropout_probability is not None:
config.learning_rate_tuner.dropout.dropout_probability = dropout_probability
if dropout_learning_rate is not None:
config.learning_rate_tuner.dropout.learning_rate = dropout_learning_rate
if dropout_prob_of_skipping is not None:
config.learning_rate_tuner.dropout.dropout_prob_of_skipping = (
dropout_prob_of_skipping)
return config
def _gen_dense_split_info(fc, threshold, left_weight, right_weight):
split_str = """
split_node {
dense_float_binary_split {
feature_column: %d
threshold: %f
}
}
left_child {
sparse_vector {
index: 0
value: %f
}
}
right_child {
sparse_vector {
index: 0
value: %f
}
}""" % (fc, threshold, left_weight, right_weight)
split = split_info_pb2.SplitInfo()
text_format.Merge(split_str, split)
return split.SerializeToString()
def _gen_dense_oblivious_split_info(fc, threshold, leave_weights,
children_parent_id):
split_str = """
split_node {
oblivious_dense_float_binary_split {
feature_column: %d
threshold: %f
}
}""" % (fc, threshold)
for weight in leave_weights:
split_str += """
children {
vector {
value: %f
}
}""" % (
weight)
for x in children_parent_id:
split_str += """
children_parent_id: %d""" % (x)
split = split_info_pb2.ObliviousSplitInfo()
text_format.Merge(split_str, split)
return split.SerializeToString()
def _gen_categorical_split_info(fc, feat_id, left_weight, right_weight):
split_str = """
split_node {
categorical_id_binary_split {
feature_column: %d
feature_id: %d
}
}
left_child {
sparse_vector {
index: 0
value: %f
}
}
right_child {
sparse_vector {
index: 0
value: %f
}
}""" % (fc, feat_id, left_weight, right_weight)
split = split_info_pb2.SplitInfo()
text_format.Merge(split_str, split)
return split.SerializeToString()
def _get_bias_update(grads, hess):
return array_ops.where(hess > 0, -grads / hess, array_ops.zeros_like(grads))
class CenterTreeEnsembleBiasOpTest(test_util.TensorFlowTestCase):
"""Tests for centering tree ensemble bias."""
def testCenterBias(self):
"""Tests bias centering for multiple iterations."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=3,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=4,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
# Dropout does not change anything here.
dropout_probability=0.5).SerializeToString()
# Center bias for the initial step.
grads = constant_op.constant([0.4, -0.3])
hess = constant_op.constant([2.0, 1.0])
continue_centering1 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering1)
self.assertEqual(continue_centering, True)
# Validate ensemble state.
# dim 0 update: -0.4/2.0 = -0.2
# dim 1 update: +0.3/1.0 = +0.3
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
vector {
value: -0.2
value: 0.3
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Center bias for another step.
# dim 0 update: -0.06/0.5 = -0.12
# dim 1 update: -0.01/0.5 = -0.02
grads = constant_op.constant([0.06, 0.01])
hess = constant_op.constant([0.5, 0.5])
continue_centering2 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=1,
next_stamp_token=2,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering2)
self.assertEqual(continue_centering, True)
# Validate ensemble state.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=2))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
vector {
value: -0.32
value: 0.28
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 2)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Center bias for another step, but this time updates are negligible.
grads = constant_op.constant([0.0000001, -0.00003])
hess = constant_op.constant([0.5, 0.0])
continue_centering3 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=2,
next_stamp_token=3,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering3)
self.assertEqual(continue_centering, False)
# Validate ensemble stamp.
new_stamp, _ = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=3))
self.assertEqual(new_stamp, 3)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
class GrowTreeEnsembleOpTest(test_util.TensorFlowTestCase):
"""Tests for growing tree ensemble from split candidates."""
def testGrowEmptyEnsemble(self):
"""Test growing an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
# Dropout does not change anything here, tree is not finalized.
dropout_probability=0.5)
# Prepare handler inputs.
# Note that handlers 1 & 3 have the same gain but different splits.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the simpler split from handler 1 to be chosen.
# The grown tree should be finalized as max tree depth is 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.52
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEmptyEnsembleObliviousCase(self):
"""Test growing an empty ensemble in the oblivious case."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# Note that handlers 1 & 3 have the same gain but different splits.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [
_gen_dense_oblivious_split_info(0, 0.52, [-4.375, 7.143], [0])
]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [
_gen_dense_oblivious_split_info(0, 0.23, [-0.6, 0.24], [0])
]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [
_gen_dense_oblivious_split_info(0, 7, [-4.375, 7.143], [0])
]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
session.run(grow_op)
# Expect the split with bigger handler_id, i.e. handler 3 to be chosen.
# The grown tree should be finalized as max tree depth is 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
oblivious_dense_float_binary_split {
feature_column: 0
threshold: 7
}
node_metadata {
gain: 7.62
original_oblivious_leaves {
}
}
}
nodes {
leaf {
vector {
value: -4.375
}
}
}
nodes {
leaf {
vector {
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowExistingEnsembleTreeNotFinalized(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.61999988556
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.14300012589
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
}
tree_weights: 0.10000000149
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=3,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
# Dropout does not change anything here - tree is not finalized.
dropout_probability=0.5)
# Prepare handler inputs.
# Handler 1 only has a candidate for partition 1, handler 2 has candidates
# for both partitions and handler 3 only has a candidate for partition 2.
handler1_partitions = np.array([1], dtype=np.int32)
handler1_gains = np.array([1.4], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.21, -6.0, 1.65)]
handler2_partitions = np.array([1, 2], dtype=np.int32)
handler2_gains = np.array([0.63, 2.7], dtype=np.float32)
handler2_split = [
_gen_dense_split_info(0, 0.23, -0.6, 0.24),
_gen_categorical_split_info(1, 7, -1.5, 2.3)
]
handler3_partitions = np.array([2], dtype=np.int32)
handler3_gains = np.array([1.7], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 3, -0.75, 1.93)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the split for partition 1 to be chosen from handler 1 and
# the split for partition 2 to be chosen from handler 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.61999988556
}
}
nodes {
dense_float_binary_split {
threshold: 0.21
left_id: 3
right_id: 4
}
node_metadata {
gain: 1.4
}
}
nodes {
categorical_id_binary_split {
feature_column: 1
feature_id: 7
left_id: 5
right_id: 6
}
node_metadata {
gain: 2.7
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -6.0
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 1.65
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -1.5
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 2)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowExistingEnsembleTreeFinalized(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.cached_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 7
left_id: 1
right_id: 2
}
node_metadata {
gain: 1.3
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -0.9
}
}
}
}
tree_weights: 0.10000000149
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(5, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(2, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(8, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.2,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect a new tree to be added with the split from handler 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 7
left_id: 1
right_id: 2
}
node_metadata {
gain: 1.3
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -0.9
}
}
}
}
trees {
nodes {
dense_float_binary_split {
feature_column: 5
threshold: 0.52
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_weights: 0.2
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 2)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 2)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 2)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsemblePrePrune(self):
"""Test growing an ensemble with pre-pruning."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# All handlers have negative gain.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([-0.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.52, 0.01, 0.0143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([-1.3], dtype=np.float32)
handler2_split = [_gen_categorical_split_info(0, 7, 0.013, 0.0143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[handler1_partitions, handler2_partitions],
gains=[handler1_gains, handler2_gains],
splits=[handler1_split, handler2_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the ensemble to be empty.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 0)
self.assertEqual(stats.active_tree, 0)
self.assertEqual(stats.active_layer, 0)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals("""
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
def testGrowEnsemblePostPruneNone(self):
"""Test growing an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.POST_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# Note that handlers 1 & 3 have the same gain but different splits.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the simpler split from handler 1 to be chosen.
# The grown tree should be finalized as max tree depth is 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.52
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsemblePostPruneAll(self):
"""Test growing an ensemble with post-pruning."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=2,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.POST_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# All handlers have negative gain.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([-1.3], dtype=np.float32)
handler1_split = [_gen_categorical_split_info(0, 7, 0.013, 0.0143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([-0.62], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.33, 0.01, 0.0143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[handler1_partitions, handler2_partitions],
gains=[handler1_gains, handler2_gains],
splits=[handler1_split, handler2_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the split from handler 2 to be chosen despite the negative gain.
# The grown tree should not be finalized as max tree depth is 2 so no
# pruning occurs.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
tree_ensemble_config.ParseFromString(serialized)
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.33
left_id: 1
right_id: 2
}
node_metadata {
gain: -0.62
original_leaf {
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.01
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.0143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Prepare handler inputs.
# All handlers have negative gain.
handler1_partitions = np.array([1, 2], dtype=np.int32)
handler1_gains = np.array([-0.2, -0.5], dtype=np.float32)
handler1_split = [
_gen_categorical_split_info(3, 7, 0.07, 0.083),
_gen_categorical_split_info(3, 5, 0.041, 0.064)
]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=1,
next_stamp_token=2,
learning_rate=0.1,
partition_ids=[handler1_partitions],
gains=[handler1_gains],
splits=[handler1_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the ensemble to be empty as post-pruning will prune
# the entire finalized tree.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=2))
tree_ensemble_config.ParseFromString(serialized)
self.assertEqual(new_stamp, 2)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 0)
self.assertEqual(stats.active_tree, 0)
self.assertEqual(stats.active_layer, 0)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals("""
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
def testGrowEnsemblePostPrunePartial(self):
"""Test growing an ensemble with post-pruning."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=2,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.POST_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# Second handler has positive gain.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([-1.3], dtype=np.float32)
handler1_split = [_gen_categorical_split_info(0, 7, 0.013, 0.0143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([-0.2], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.33, 0.01, 0.0143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[handler1_partitions, handler2_partitions],
gains=[handler1_gains, handler2_gains],
splits=[handler1_split, handler2_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the split from handler 2 to be chosen despite the negative gain.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.33
left_id: 1
right_id: 2
}
node_metadata {
gain: -0.2
original_leaf {
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.01
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.0143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Prepare handler inputs for second layer.
# Note that partition 1 gain is negative and partition 2 gain is positive.
handler1_partitions = np.array([1, 2], dtype=np.int32)
handler1_gains = np.array([-0.2, 0.5], dtype=np.float32)
handler1_split = [
_gen_categorical_split_info(3, 7, 0.07, 0.083),
_gen_categorical_split_info(3, 5, 0.041, 0.064)
]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=1,
next_stamp_token=2,
learning_rate=0.1,
partition_ids=[handler1_partitions],
gains=[handler1_gains],
splits=[handler1_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the negative gain split of partition 1 to be pruned and the
# positive gain split of partition 2 to be retained.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=2))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.33
left_id: 1
right_id: 2
}
node_metadata {
gain: -0.2
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.01
}
}
}
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 5
left_id: 3
right_id: 4
}
node_metadata {
gain: 0.5
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.041
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.064
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 2)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 2)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsembleTreeLayerByLayer(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
}
tree_weights: 0.10000000149
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=3,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.LAYER_BY_LAYER,
# Dropout will have no effect, since the tree will not be fully grown.
dropout_probability=1.0)
# Prepare handler inputs.
# Handler 1 only has a candidate for partition 1, handler 2 has candidates
# for both partitions and handler 3 only has a candidate for partition 2.
handler1_partitions = np.array([1], dtype=np.int32)
handler1_gains = np.array([1.4], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.21, -6.0, 1.65)]
handler2_partitions = np.array([1, 2], dtype=np.int32)
handler2_gains = np.array([0.63, 2.7], dtype=np.float32)
handler2_split = [
_gen_dense_split_info(0, 0.23, -0.6, 0.24),
_gen_categorical_split_info(1, 7, -1.5, 2.3)
]
handler3_partitions = np.array([2], dtype=np.int32)
handler3_gains = np.array([1.7], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 3, -0.75, 1.93)]
# Grow tree ensemble layer by layer.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the split for partition 1 to be chosen from handler 1 and
# the split for partition 2 to be chosen from handler 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
# The partition 1 split weights get added to original leaf weight 7.143.
# The partition 2 split weights get added to original leaf weight -4.375.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
dense_float_binary_split {
threshold: 0.21
left_id: 3
right_id: 4
}
node_metadata {
gain: 1.4
}
}
nodes {
categorical_id_binary_split {
feature_column: 1
feature_id: 7
left_id: 5
right_id: 6
}
node_metadata {
gain: 2.7
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 1.143
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 8.793
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -5.875
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -2.075
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 2)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsembleTreeLayerByLayerObliviousCase(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
oblivious_dense_float_binary_split {
feature_column: 4
threshold: 7
}
node_metadata {
gain: 7.62
original_oblivious_leaves {
}
}
}
nodes {
leaf {
vector {
value: 7.143
}
}
}
nodes {
leaf {
vector {
value: -4.375
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=3,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.LAYER_BY_LAYER)
# Prepare handler inputs.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([1.4], dtype=np.float32)
handler1_split = [
_gen_dense_oblivious_split_info(0, 0.21, [-6.0, 1.65, 1.0, -0.5],
[1, 2])
]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([2.7], dtype=np.float32)
handler2_split = [
_gen_dense_oblivious_split_info(0, 0.23, [-0.6, 0.24, 0.3, 0.4],
[1, 2])
]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([1.7], dtype=np.float32)
handler3_split = [
_gen_dense_oblivious_split_info(0, 3, [-0.75, 1.93, 0.2, -0.1],
[1, 2])
]
# Grow tree ensemble layer by layer.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
session.run(grow_op)
# Expect the split for partition 1 to be chosen from handler 1 and
# the split for partition 2 to be chosen from handler 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
# The partition 1 split weights get added to original leaf weight 7.143.
# The partition 2 split weights get added to original leaf weight -4.375.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
oblivious_dense_float_binary_split {
feature_column: 4
threshold: 7
}
node_metadata {
gain: 7.62
original_oblivious_leaves {
}
}
}
nodes {
oblivious_dense_float_binary_split {
feature_column: 0
threshold: 0.23
}
node_metadata {
gain: 2.7
original_oblivious_leaves {
vector {
value: 7.143
}
}
original_oblivious_leaves {
vector {
value: -4.375
}
}
}
}
nodes {
leaf {
vector {
value: 6.543
}
}
}
nodes {
leaf {
vector {
value: 7.383
}
}
}
nodes {
leaf {
vector {
value: -4.075
}
}
}
nodes {
leaf {
vector {
value: -3.975
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 2)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsembleWithEmptyNodesMiddleCase(self):
"""Test case: The middle existing leaves don't have examples."""
with self.cached_session() as session:
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
oblivious_dense_float_binary_split {
feature_column: 4
threshold: 7
}
node_metadata {
gain: 7.62
original_oblivious_leaves {
}
}
}
nodes {
oblivious_dense_float_binary_split {
feature_column: 1
threshold: 0.23
}
node_metadata {
gain: 2.7
original_oblivious_leaves {
vector {
value: 7.143
}
}
original_oblivious_leaves {
vector {
value: -4.375
}
}
}
}
nodes {
leaf {
vector {
value: 6.543
}
}
}
nodes {
leaf {
vector {
value: 7.5
}
}
}
nodes {
leaf {
vector {
value: -4.075
}
}
}
nodes {
leaf {
vector {
value: -3.975
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=6,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.LAYER_BY_LAYER)
# Prepare handler inputs.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([1.8], dtype=np.float32)
handler1_split = [
_gen_dense_oblivious_split_info(0, 0.9, [1.0, 2.0, 3.0, 4.0], [2, 5])
]
# The tree currently has depth 2, so the ids for the four leaves are in
# the range [2, 6). In this test case we are assuming that our examples
# only fall in leaves 2 and 5.
# Grow tree ensemble layer by layer.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[handler1_partitions],
gains=[handler1_gains],
splits=[handler1_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
session.run(grow_op)
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
oblivious_dense_float_binary_split {
feature_column: 4
threshold: 7
}
node_metadata {
gain: 7.62
original_oblivious_leaves {
}
}
}
nodes {
oblivious_dense_float_binary_split {
feature_column: 1
threshold: 0.23
}
node_metadata {
gain: 2.7
original_oblivious_leaves {
vector {
value: 7.143
}
}
original_oblivious_leaves {
vector {
value: -4.375
}
}
}
}
nodes {
oblivious_dense_float_binary_split {
feature_column: 0
threshold: 0.9
}
node_metadata {
gain: 1.8
original_oblivious_leaves {
vector {
value: 6.543
}
}
original_oblivious_leaves {
vector {
value: 7.5
}
}
original_oblivious_leaves {
vector {
value: -4.075
}
}
original_oblivious_leaves {
vector {
value: -3.975
}
}
}
}
nodes {
leaf {
vector {
value: 7.543
}
}
}
nodes {
leaf {
vector {
value: 8.543
}
}
}
nodes {
leaf {
vector {
value: 7.5
}
}
}
nodes {
leaf {
vector {
value: 7.5
}
}
}
nodes {
leaf {
vector {
value: -4.075
}
}
}
nodes {
leaf {
vector {
value: -4.075
}
}
}
nodes {
leaf {
vector {
value: -0.975
}
}
}
nodes {
leaf {
vector {
value: 0.025
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 3
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 3)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 3)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 3)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsembleWithEmptyNodesBorderCase(self):
"""Test case: The first and last existing leaves don't have examples."""
with self.cached_session() as session:
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
oblivious_dense_float_binary_split {
feature_column: 4
threshold: 7
}
node_metadata {
gain: 7.62
original_oblivious_leaves {
}
}
}
nodes {
oblivious_dense_float_binary_split {
feature_column: 1
threshold: 0.23
}
node_metadata {
gain: 2.7
original_oblivious_leaves {
vector {
value: 7.143
}
}
original_oblivious_leaves {
vector {
value: -4.375
}
}
}
}
nodes {
leaf {
vector {
value: 6.543
}
}
}
nodes {
leaf {
vector {
value: 7.5
}
}
}
nodes {
leaf {
vector {
value: -4.075
}
}
}
nodes {
leaf {
vector {
value: -3.975
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=6,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.LAYER_BY_LAYER)
# Prepare handler inputs.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([1.8], dtype=np.float32)
handler1_split = [
_gen_dense_oblivious_split_info(0, 0.9, [1.0, 2.0, 3.0, 4.0], [3, 4])
]
# The tree currently has depth 2, so the ids for the four leaves are in
# the range [2, 6). In this test case we are assuming that our examples
# only fall in leaves 3 and 4.
# Grow tree ensemble layer by layer.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[handler1_partitions],
gains=[handler1_gains],
splits=[handler1_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
session.run(grow_op)
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
oblivious_dense_float_binary_split {
feature_column: 4
threshold: 7
}
node_metadata {
gain: 7.62
original_oblivious_leaves {
}
}
}
nodes {
oblivious_dense_float_binary_split {
feature_column: 1
threshold: 0.23
}
node_metadata {
gain: 2.7
original_oblivious_leaves {
vector {
value: 7.143
}
}
original_oblivious_leaves {
vector {
value: -4.375
}
}
}
}
nodes {
oblivious_dense_float_binary_split {
feature_column: 0
threshold: 0.9
}
node_metadata {
gain: 1.8
original_oblivious_leaves {
vector {
value: 6.543
}
}
original_oblivious_leaves {
vector {
value: 7.5
}
}
original_oblivious_leaves {
vector {
value: -4.075
}
}
original_oblivious_leaves {
vector {
value: -3.975
}
}
}
}
nodes {
leaf {
vector {
value: 6.543
}
}
}
nodes {
leaf {
vector {
value: 6.543
}
}
}
nodes {
leaf {
vector {
value: 8.5
}
}
}
nodes {
leaf {
vector {
value: 9.5
}
}
}
nodes {
leaf {
vector {
value: -1.075
}
}
}
nodes {
leaf {
vector {
value: -0.075
}
}
}
nodes {
leaf {
vector {
value: -3.975
}
}
}
nodes {
leaf {
vector {
value: -3.975
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 3
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 3)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 3)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 3)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowExistingEnsembleTreeFinalizedWithDropout(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.cached_session() as session:
# Create existing ensemble with one root split and one bias tree.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
leaf {
vector {
value: -0.32
value: 0.28
}
}
}
}
trees {
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 7
left_id: 1
right_id: 2
}
node_metadata {
gain: 1.3
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -0.9
}
}
}
}
tree_weights: 0.7
tree_weights: 1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_tree_weight_updates: 5
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
dropout_probability=1.0)
# Prepare handler inputs.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(5, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(2, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(8, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect a new tree to be added with the split from handler 1.
_, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
tree_ensemble_config.ParseFromString(serialized)
self.assertEqual(3, len(tree_ensemble_config.trees))
# Both trees got 0.5 as weights, bias tree is untouched.
self.assertAllClose([0.7, 0.5, 0.5], tree_ensemble_config.tree_weights)
self.assertEqual(
1, tree_ensemble_config.tree_metadata[0].num_tree_weight_updates)
self.assertEqual(
6, tree_ensemble_config.tree_metadata[1].num_tree_weight_updates)
self.assertEqual(
2, tree_ensemble_config.tree_metadata[2].num_tree_weight_updates)
def testGrowExistingEnsembleTreeWithFeatureSelectionUsedHandlers(self):
"""Test growing a tree with feature selection."""
with self.cached_session() as session:
# Create existing ensemble with one root split and one bias tree.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
leaf {
vector {
value: -0.32
value: 0.28
}
}
}
}
trees {
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 7
left_id: 1
right_id: 2
}
node_metadata {
gain: 1.3
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -0.9
}
}
}
}
tree_weights: 0.7
tree_weights: 1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_tree_weight_updates: 5
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
used_handler_ids: 2
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
learner_config.constraints.max_number_of_unique_feature_columns = 3
# Prepare handler inputs.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(5, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(2, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(8, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
_, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
tree_ensemble_config.ParseFromString(serialized)
self.assertEqual(3, len(tree_ensemble_config.trees))
# 2 was already used. handler 0 is being added in this tree.
self.assertAllEqual(
[0, 2], tree_ensemble_config.growing_metadata.used_handler_ids)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow split handler Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.contrib.boosted_trees.python.ops import split_handler_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class SplitHandlerOpsTest(test_util.TensorFlowTestCase):
def testMakeDenseSplit(self):
"""Tests split handler op."""
with self.cached_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Dense Quantile |
# (1.2, 0.2) | 0 | 0 |
# (-0.3, 0.19) | 0 | 1 |
# (4.0, 0.13) | 1 | 1 |
partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant(
[[0, 0], [1, 0], [1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([2.4, -0.6, 8.0])
hessians = array_ops.constant([0.4, 0.38, 0.26])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.3 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.3 + 0.1)
expected_right_gain = 0.033613445378151252
# (-0.3 + 1.2 - 0.1) ** 2 / (0.19 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
expected_right_weight = 0
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# There's only one active bucket here so zero gain is expected.
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testMakeMulticlassDenseSplit(self):
"""Tests split handler op."""
with self.cached_session() as sess:
partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant(
[[0, 0], [1, 0], [1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([[2.4, 3.0], [-0.6, 0.1], [8.0, 1.0]])
hessians = array_ops.constant([[[0.4, 1], [1, 1]], [[0.38, 1], [1, 1]],
[[0.26, 1], [1, 1]]])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testMakeDenseSplitEmptyInputs(self):
"""Tests empty inputs op."""
with self.cached_session() as sess:
partition_ids = array_ops.constant([], dtype=dtypes.int32)
bucket_ids = array_ops.constant([[]], dtype=dtypes.int64)
gradients = array_ops.constant([])
hessians = array_ops.constant([])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=0,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE))
partitions, gains, splits = sess.run([partitions, gains, splits])
# .assertEmpty doesn't exist on ubuntu-contrib
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(gains))
self.assertEqual(0, len(splits))
def testMakeSparseSplit(self):
"""Tests split handler op."""
with self.cached_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | bucket ID |
# (0.9, 0.39) | 0 | -1 |
# (1.2, 0.2) | 0 | 0 |
# (0.2, 0.12) | 0 | 1 |
# (4.0, 0.13) | 1 | -1 |
# (4.0, 0.13) | 1 | 1 |
partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32)
# We have only 1 dimension in our sparse feature column.
bucket_ids = array_ops.constant([-1, 0, 1, -1, 1], dtype=dtypes.int64)
dimension_ids = array_ops.constant([0, 0, 0, 0, 0], dtype=dtypes.int64)
bucket_ids = array_ops.stack([bucket_ids, dimension_ids], axis=1)
gradients = array_ops.constant([1.8, 2.4, 0.4, 8.0, 8.0])
hessians = array_ops.constant([0.78, 0.4, 0.24, 0.26, 0.26])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertAllEqual([0, 1], partitions)
self.assertEqual(2, len(splits))
# Check the split on partition 0.
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
# Sparse is one dimensional.
self.assertEqual(0, split_node.split.dimension_id)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
# Sparse is one dimensional.
self.assertEqual(0, split_node.split.dimension_id)
self.assertAllClose(0.52, split_node.split.threshold)
def testMakeSparseSplitAllEmptyDimensions(self):
"""Tests split handler op when all dimensions have only bias bucket id."""
with self.cached_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Dimension | bucket ID |
# (0.9, 0.39) | 0 | 0 | -1 |
# (4.0, 0.13) | 1 | 0 | -1 |
partition_ids = array_ops.constant([0, 1], dtype=dtypes.int32)
# We have only 1 dimension in our sparse feature column.
bucket_ids = array_ops.constant([[-1, 0], [-1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([1.8, 8.0])
hessians = array_ops.constant([0.78, 0.26])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(splits))
def testMakeSparseMultidimensionalSplit(self):
"""Tests split handler op."""
with self.cached_session() as sess:
# Num of steps is 2.
# The feature column is three dimensional.
# First dimension has bias bucket only, the second has bias bucket and
# two valid buckets, the third has just one bias bucket and one valid
# bucket.
# Gradients | Partition | Dimension | bucket ID |
# (0.9, 0.39) | 0 | 0 | -1 |
# (1.2, 0.2) | 0 | 1 | 0 |
# (0.2, 0.12) | 0 | 1 | 2 |
# (0.1, 0.1) | 0 | 2 | 3 |
# Now second node - nothing interesting there, just one dimension.
# Second node has the same bucket ids for all dimensions.
# (4.0, 0.13) | 1 | 0 | -1 |
# (4.0, 0.13) | 1 | 2 | 3 |
# Tree node ids.
partition_ids = array_ops.constant([0, 0, 0, 0, 1, 1], dtype=dtypes.int32)
dimension_ids = array_ops.constant([0, 1, 1, 2, 0, 2], dtype=dtypes.int64)
bucket_ids = array_ops.constant([-1, 0, 2, 3, -1, 3], dtype=dtypes.int64)
bucket_ids = array_ops.stack([bucket_ids, dimension_ids], axis=1)
gradients = array_ops.constant([1.8, 2.4, 0.4, 0.2, 8.0, 8.0])
hessians = array_ops.constant([0.78, 0.4, 0.24, 0.2, 0.26, 0.26])
bucket_boundaries = array_ops.constant([0.3, 0.52, 0.58, 0.6])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertAllEqual([0, 1], partitions)
self.assertEqual(2, len(splits))
# Check the split on node 0 - it should split on second dimension
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
# Split happened on second dimension.
self.assertEqual(1, split_node.split.dimension_id)
self.assertAllClose(0.58, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertEqual(2, split_node.split.dimension_id)
self.assertAllClose(0.6, split_node.split.threshold)
def testMakeSparseSplitDefaultDirectionIsStable(self):
"""Tests default direction is stable when no sparsity."""
random.seed(1123)
for _ in range(50):
with self.cached_session() as sess:
grad = random.random()
hessian = random.random()
# The data looks like the following (divide by the num of steps 2).
# Gradients | Partition | bucket ID |
# (grad, hessian) | 0 | -1 |
# And then 100 buckets of
# (grad/100, hessian/100), so there is no sparsity.
n_buckets = 100
# 1 for the overall sum, and 100 buckets.
partition_ids = array_ops.constant(
[0] * (n_buckets + 1), dtype=dtypes.int32)
# We have only 1 dimension in our sparse feature column.
bucket_ids = [-1] + [n for n in range(100)]
bucket_ids = array_ops.constant(bucket_ids, dtype=dtypes.int64)
dimension_ids = array_ops.constant(
[0] * (n_buckets + 1), dtype=dtypes.int64)
bucket_ids = array_ops.stack([bucket_ids, dimension_ids], axis=1)
gradients = [grad] + [grad / n_buckets] * n_buckets
gradients = array_ops.constant(gradients)
hessians = [hessian] + [hessian / n_buckets] * n_buckets
hessians = array_ops.constant(hessians)
boundaries = [x * 1 for x in range(n_buckets + 1)]
bucket_boundaries = array_ops.constant(boundaries, dtype=dtypes.float32)
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertAllEqual([0], partitions)
self.assertEqual(1, len(splits))
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
self.assertTrue(
split_info.split_node.HasField(
'sparse_float_binary_split_default_left'))
def testMakeMulticlassSparseSplit(self):
"""Tests split handler op."""
with self.cached_session() as sess:
partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant(
[[-1, 0], [0, 0], [1, 0], [-1, 0], [1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([[1.8, 3.5], [2.4, 1.0], [0.4, 4.0],
[8.0, 3.1], [8.0, 0.8]])
hessian_0 = [[0.78, 1], [12, 1]]
hessian_1 = [[0.4, 1], [1, 1]]
hessian_2 = [[0.24, 1], [1, 1]]
hessian_3 = [[0.26, 1], [1, 1]]
hessian_4 = [[0.26, 1], [1, 1]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3, hessian_4])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testMakeCategoricalEqualitySplit(self):
"""Tests split handler op for categorical equality split."""
with self.cached_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Feature ID |
# (0.9, 0.39) | 0 | -1 |
# (0.2, 0.12) | 0 | 1 |
# (1.4, 0.32) | 0 | 2 |
# (4.0, 0.13) | 1 | -1 |
# (4.0, 0.13) | 1 | 1 |
gradients = [1.8, 0.4, 2.8, 8.0, 8.0]
hessians = [0.78, 0.24, 0.64, 0.26, 0.26]
partition_ids = [0, 0, 0, 1, 1]
feature_ids = array_ops.constant(
[[-1, 0], [1, 0], [2, 0], [-1, 0], [1, 0]], dtype=dtypes.int64)
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=2,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testMakeMulticlassCategoricalEqualitySplit(self):
"""Tests split handler op for categorical equality split in multiclass."""
with self.cached_session() as sess:
gradients = array_ops.constant([[1.8, 3.5], [2.4, 1.0], [0.4, 4.0],
[9.0, 3.1], [3.0, 0.8]])
hessian_0 = [[0.78, 1], [12, 1]]
hessian_1 = [[0.4, 1], [1, 1]]
hessian_2 = [[0.24, 1], [1, 1]]
hessian_3 = [[0.16, 2], [-1, 1]]
hessian_4 = [[0.6, 1], [2, 1]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3, hessian_4])
partition_ids = [0, 0, 0, 1, 1]
feature_ids = array_ops.constant(
[[-1, 0], [1, 0], [2, 0], [-1, 0], [1, 0]], dtype=dtypes.int64)
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=2,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testMakeCategoricalEqualitySplitEmptyInput(self):
with self.cached_session() as sess:
gradients = []
hessians = []
partition_ids = []
feature_ids = [[]]
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=0,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(gains))
self.assertEqual(0, len(splits))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/kernel_tests/split_handler_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking quantile related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import tempfile
import numpy as np
from tensorflow.contrib.boosted_trees.proto.quantiles_pb2 import QuantileConfig
from tensorflow.contrib.boosted_trees.python.ops import quantile_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
from tensorflow.python.training import saver
class QuantileBucketsOpTest(test_util.TensorFlowTestCase):
def _gen_config(self, eps, num_quantiles):
config = QuantileConfig()
config.eps = eps
config.num_quantiles = num_quantiles
return config.SerializeToString()
def testBasicQuantileBuckets(self):
"""Sets up the quantile summary op test as follows.
Create a batch of 6 examples having a dense and sparse features. SparseM is
a sparse multi-dimensional (multivalent) feature.
The data looks like this
| Instance | instance weights | Dense 0 | Sparse 0 | SparseM
| 0 | 10 | 1 | | | |
| 1 | 1 | 2 | 2 | 2 | |
| 2 | 1 | 3 | 3 | 3 | |
| 3 | 1 | 4 | 4 | | 4 |
| 4 | 1 | 4 | 5 | | 5 |
| 5 | 1 | 5 | 6 | | 6 |
"""
dense_float_tensor_0 = constant_op.constant(
[1, 2, 3, 4, 4, 5], dtype=dtypes.float32)
sparse_indices_0 = constant_op.constant(
[[1, 0], [2, 0], [3, 0], [4, 0], [5, 0]], dtype=dtypes.int64)
sparse_values_0 = constant_op.constant(
[2, 3, 4, 5, 6], dtype=dtypes.float32)
sparse_shape_0 = constant_op.constant([6, 1], dtype=dtypes.int64)
# Multi-dimensional feature that should have the same quantiles as Sparse 0.
sparse_indices_m = constant_op.constant(
[[1, 1], [2, 0], [3, 1], [4, 1], [5, 1]], dtype=dtypes.int64)
sparse_values_m = constant_op.constant(
[2, 3, 4, 5, 6], dtype=dtypes.float32)
sparse_shape_m = constant_op.constant([6, 2], dtype=dtypes.int64)
example_weights = constant_op.constant(
[10, 1, 1, 1, 1, 1], dtype=dtypes.float32)
with self.cached_session():
config = self._gen_config(0.33, 3)
dense_buckets, sparse_buckets = quantile_ops.quantile_buckets(
[dense_float_tensor_0], [sparse_indices_0, sparse_indices_m],
[sparse_values_0, sparse_values_m], [sparse_shape_0, sparse_shape_m],
example_weights=example_weights,
dense_config=[config],
sparse_config=[config, config])
self.assertAllEqual([1, 3, 5], dense_buckets[0].eval())
self.assertAllEqual([2, 4, 6.], sparse_buckets[0].eval())
# Multidimensional sparse.
self.assertAllEqual([2, 4, 6.], sparse_buckets[1].eval())
def testStreamingQuantileBucketsWithVaryingBatch(self):
"""Sets up the quantile summary op test as follows.
Creates batches examples with different number of inputs in each batch.
The input values are dense in the range [1 ... N]
The data looks like this:
| Batch | Start | InputList
| 1 | 1 | [1]
| 2 | 2 | [2, 3]
| 3 | 4 | [4, 5, 6]
| 4 | 7 | [7, 8, 9, 10]
| 5 | 11 | [11, 12, 13, 14, 15]
| 6 | 16 | [16, 17, 18, 19, 20, 21]
"""
num_quantiles = 3
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=num_quantiles,
epsilon=0.001, name="q1")
resources.initialize_resources(resources.shared_resources()).run()
input_column = array_ops.placeholder(dtypes.float32)
weights = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=input_column,
example_weights=weights)
with self.cached_session() as sess:
for i in range(1, 23):
# start = 1, 2, 4, 7, 11, 16 ... (see comment above)
start = int((i * (i-1) / 2) + 1)
sess.run(update,
{input_column: range(start, start+i),
weights: [1] * i})
with self.cached_session() as sess:
sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run(
[buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertEqual(num_quantiles + 1, len(buckets))
self.assertAllEqual([1, 86., 170., 253.], buckets)
def testStreamingQuantileBucketsLowPrecisionInput(self):
"""Tests inputs that simulate low precision float16 values."""
num_quantiles = 3
# set generate_quantiles to True since the test will generate fewer
# boundaries otherwise.
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=num_quantiles,
epsilon=0.001, name="q1", generate_quantiles=True)
resources.initialize_resources(resources.shared_resources()).run()
input_column = array_ops.placeholder(dtypes.float32)
weights = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=input_column,
example_weights=weights)
with self.cached_session() as sess:
# This input is generated by integer in the range [2030, 2060]
# but represented by with float16 precision. Integers <= 2048 are
# exactly represented, whereas numbers > 2048 are rounded; and hence
# numbers > 2048 are repeated. For precision loss / rounding, see:
# https://en.wikipedia.org/wiki/Half-precision_floating-point_format.
#
# The intent of the test is not handling of float16 values, but to
# validate the number of buckets is returned, in cases where the input
# may contain repeated values.
inputs = [
2030.0, 2031.0, 2032.0, 2033.0, 2034.0, 2035.0, 2036.0, 2037.0,
2038.0, 2039.0, 2040.0, 2041.0, 2042.0, 2043.0, 2044.0, 2045.0,
2046.0, 2047.0, 2048.0, 2048.0, 2050.0, 2052.0, 2052.0, 2052.0,
2054.0, 2056.0, 2056.0, 2056.0, 2058.0, 2060.0
]
sess.run(update,
{input_column: inputs,
weights: [1] * len(inputs)})
with self.cached_session() as sess:
sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run(
[buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertEqual(num_quantiles + 1, len(buckets))
self.assertAllEqual([2030, 2040, 2050, 2060], buckets)
def _testStreamingQuantileBucketsHelper(
self, inputs, num_quantiles=3, expected_buckets=None):
"""Helper to test quantile buckets on different inputs."""
# set generate_quantiles to True since the test will generate fewer
# boundaries otherwise.
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=num_quantiles,
epsilon=0.001, name="q1", generate_quantiles=True)
resources.initialize_resources(resources.shared_resources()).run()
input_column = array_ops.placeholder(dtypes.float32)
weights = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=input_column,
example_weights=weights)
with self.cached_session() as sess:
sess.run(update,
{input_column: inputs,
weights: [1] * len(inputs)})
with self.cached_session() as sess:
sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run(
[buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
# By default, use 3 quantiles, 4 boundaries for simplicity.
self.assertEqual(num_quantiles + 1, len(buckets))
if expected_buckets:
self.assertAllEqual(buckets, expected_buckets)
def testStreamingQuantileBucketsRepeatedSingleValue(self):
inputs = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self._testStreamingQuantileBucketsHelper(inputs)
def testStreamingQ2antileBucketsRepeatedTwoValues(self):
inputs = [1, 1, 1, 2, 2, 2, 2, 2, 1, 1]
self._testStreamingQuantileBucketsHelper(inputs)
def testStreamingQ2antileBucketsRepeatedTwoValuesUnbalanced(self):
inputs = [7, 7, 7, 2, 7, 7, 2, 2, 7, 7]
self._testStreamingQuantileBucketsHelper(inputs)
def testStreamingQuantileBucketsFewerInputstThanBuckets(self):
inputs = [5]
self._testStreamingQuantileBucketsHelper(inputs)
def testStreamingQuantileBucketsEqualDistributionInSequence(self):
# Input pattern is of the form [1, 1, 1, 2, 2, 2, 3, 3, 3, ...]
ones = 100 * [1]
inputs = []
for i in range(1, 101):
inputs += [i * k for k in ones]
# Expect 100 equally spaced buckets.
expected_buckets = range(1, 101)
self._testStreamingQuantileBucketsHelper(
inputs, num_quantiles=99, expected_buckets=expected_buckets)
def testStreamingQuantileBucketsEqualDistributionInterleaved(self):
# Input pattern is of the form [1, 2, 3, 1, 2, 3, 1, 2, 3, ...]
sequence = range(1, 101)
inputs = []
for _ in range(1, 101):
inputs += sequence
# Expect 100 equally spaced buckets.
expected_buckets = range(1, 101)
self._testStreamingQuantileBucketsHelper(
inputs, num_quantiles=99, expected_buckets=expected_buckets)
def testStreamingQuantileBuckets(self):
"""Sets up the quantile summary op test as follows.
100 batches of data is added to the accumulator. The batches are in form:
[0 1 .. 99]
[100 101 .. 200]
...
[9900 9901 .. 9999]
All the batches have 1 for all the example weights.
"""
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.01, name="q1")
resources.initialize_resources(resources.shared_resources()).run()
weight_placeholder = array_ops.placeholder(dtypes.float32)
dense_placeholder = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=dense_placeholder,
example_weights=weight_placeholder)
with self.cached_session() as sess:
for i in range(100):
dense_float = np.linspace(
i * 100, (i + 1) * 100 - 1, num=100).reshape(-1, 1)
sess.run(update, {
dense_placeholder: dense_float,
weight_placeholder: np.ones(shape=(100, 1), dtype=np.float32)
})
with self.cached_session() as sess:
sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([0, 3335., 6671., 9999.], buckets)
def testStreamingQuantileBucketsTwoLevel(self):
"""Sets up the quantile summary op test as follows.
100 batches of data is added to the accumulator. The batches are in form:
[0 1 .. 99]
[100 101 .. 200]
...
[9900 9901 .. 9999]
All the batches have 1 for all the example weights.
"""
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.01, name="q1")
accumulator_2 = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.01, name="q2")
resources.initialize_resources(resources.shared_resources()).run()
weight_placeholder = array_ops.placeholder(dtypes.float32)
dense_placeholder = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=dense_placeholder,
example_weights=weight_placeholder)
with self.cached_session() as sess:
for i in range(100):
dense_float = np.linspace(
i * 100, (i + 1) * 100 - 1, num=100).reshape(-1, 1)
sess.run(update, {
dense_placeholder: dense_float,
weight_placeholder: np.ones(shape=(100, 1), dtype=np.float32)
})
with self.cached_session() as sess:
summary = sess.run(
accumulator.flush_summary(stamp_token=0, next_stamp_token=1))
sess.run(
accumulator_2.add_prebuilt_summary(
stamp_token=0, summary=constant_op.constant(summary)))
sess.run(accumulator_2.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator_2.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([0, 3337., 6677., 9999.], buckets)
def testSaveRestoreBeforeFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.33, name="q0")
save = saver.Saver()
resources.initialize_resources(resources.shared_resources()).run()
sparse_indices_0 = constant_op.constant(
[[1, 0], [2, 1], [3, 0], [4, 2], [5, 0]], dtype=dtypes.int64)
sparse_values_0 = constant_op.constant(
[2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtypes.float32)
sparse_shape_0 = constant_op.constant([6, 3], dtype=dtypes.int64)
example_weights = constant_op.constant(
[10, 1, 1, 1, 1, 1], dtype=dtypes.float32, shape=[6, 1])
update = accumulator.add_summary(
stamp_token=0,
column=sparse_tensor.SparseTensor(sparse_indices_0, sparse_values_0,
sparse_shape_0),
example_weights=example_weights)
update.run()
save.save(sess, save_path)
reset = accumulator.flush(stamp_token=0, next_stamp_token=1)
with ops.control_dependencies([reset]):
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([2, 4, 6.], buckets)
with self.session(graph=ops.Graph()) as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.33, name="q0")
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
are_ready_noflush = accumulator.get_buckets(stamp_token=0)[0]
with ops.control_dependencies([are_ready_noflush]):
reset = accumulator.flush(stamp_token=0, next_stamp_token=1)
with ops.control_dependencies([reset]):
are_ready_flush, buckets = accumulator.get_buckets(stamp_token=1)
buckets, are_ready_flush, are_ready_noflush = (sess.run(
[buckets, are_ready_flush, are_ready_noflush]))
self.assertFalse(are_ready_noflush)
self.assertTrue(are_ready_flush)
self.assertAllEqual([2, 4, 6.], buckets)
def testSaveRestoreAfterFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.33, name="q0")
save = saver.Saver()
resources.initialize_resources(resources.shared_resources()).run()
example_weights = constant_op.constant(
[10, 1, 1, 1, 1, 1], dtype=dtypes.float32, shape=[6, 1])
dense_float_tensor_0 = constant_op.constant(
[1, 2, 3, 4, 4, 5], dtype=dtypes.float32, shape=[6, 1])
update = accumulator.add_summary(
stamp_token=0,
column=dense_float_tensor_0,
example_weights=example_weights)
update.run()
reset = accumulator.flush(stamp_token=0, next_stamp_token=1)
with ops.control_dependencies([reset]):
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([1, 3, 5], buckets)
save.save(sess, save_path)
with self.session(graph=ops.Graph()) as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.33, name="q0")
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([1, 3, 5], buckets)
def testFixedUniform(self):
"""Sets up the quantile summary op test as follows.
Creates array dividing range [0, 1] to 1<<16 elements equally spaced
with weight of 1.0.
"""
dense_float_tensor_0 = constant_op.constant(
[(1.0 * i) / math.pow(2.0, 16)
for i in range(0, int(math.pow(2, 16)) + 1)])
example_weights = constant_op.constant(
[1] * (int(math.pow(2, 16)) + 1), dtype=dtypes.float32)
config = self._gen_config(0.1, 10)
with self.cached_session():
dense_buckets, _ = quantile_ops.quantile_buckets(
[dense_float_tensor_0], [], [], [],
example_weights=example_weights,
dense_config=[config],
sparse_config=[])
self.assertAllClose(
[0] + [(i + 1.0) / 10 for i in range(0, 10)],
dense_buckets[0].eval(),
atol=0.1)
def testFixedNonUniform(self):
"""Sets up the quantile summary op test as follows.
Creates array dividing range [0, 1] to 1<<16 elements equally spaced
with weight same as the value.
"""
dense_float_tensor_0 = constant_op.constant(
[(1.0 * i) / math.pow(2.0, 16)
for i in range(0, int(math.pow(2, 16)) + 1)])
example_weights = constant_op.constant(
[(1.0 * i) / math.pow(2.0, 16)
for i in range(0, int(math.pow(2, 16)) + 1)])
config = self._gen_config(0.1, 10)
with self.cached_session():
dense_buckets, _ = quantile_ops.quantile_buckets(
[dense_float_tensor_0], [], [], [],
example_weights=example_weights,
dense_config=[config],
sparse_config=[])
self.assertAllClose(
[0] + [math.sqrt((i + 1.0) / 10) for i in range(0, 10)],
dense_buckets[0].eval(),
atol=0.1)
class QuantilesOpTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up the quantile op tests.
Create a batch of 4 examples having 2 dense and 4 sparse features.
Fourth sparse feature is multivalent (3 dimensional)
The data looks like this
| Instance | Dense 0 | Dense 1 | Sparse 0 | Sparse 1 |Sparse 2| SparseM
| 0 | -0.1 | -1 | -2 | 0.1 | |_ ,1,_
| 1 | 0.4 | -15 | 5.5 | | 2 |2 ,_,_
| 2 | 3.2 | 18 | 16 | 3 | |__,_,_
| 3 | 190 | 1000 | 17.5 | -3 | 4 |1 ,8,1
Quantiles are:
Dense 0: (-inf,0.4], (0.4,5], (5, 190]
Dense 1: (-inf, -9], (-9,15], (15, 1000)
Sparse 0: (-inf, 5], (5,16], (16, 100]
Sparse 1: (-inf, 2], (2, 5]
Sparse 2: (-inf, 100]
SparseM: (-inf, 1], (1,2], (2,1000]
"""
super(QuantilesOpTest, self).setUp()
self._dense_float_tensor_0 = constant_op.constant(
[[-0.1], [0.4], [3.2], [190]], dtype=dtypes.float32)
self._dense_float_tensor_1 = constant_op.constant(
[[-1], [-15], [18], [1000]], dtype=dtypes.float32)
# Sparse feature 0
self._sparse_indices_0 = constant_op.constant(
[[0, 0], [1, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
self._sparse_values_0 = constant_op.constant([-2, 5.5, 16, 17.5])
self._sparse_shape_0 = constant_op.constant([4, 1])
# Sprase feature 1
self._sparse_indices_1 = constant_op.constant(
[[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
self._sparse_values_1 = constant_op.constant([0.1, 3, -3])
self._sparse_shape_1 = constant_op.constant([4, 1])
# Sprase feature 2
self._sparse_indices_2 = constant_op.constant(
[[1, 0], [3, 0]], dtype=dtypes.int64)
self._sparse_values_2 = constant_op.constant([2, 4], dtype=dtypes.float32)
self._sparse_shape_2 = constant_op.constant([4, 1])
# Sprase feature M
self._sparse_indices_m = constant_op.constant(
[[0, 1], [1, 0], [3, 0], [3, 1], [3, 2]], dtype=dtypes.int64)
self._sparse_values_m = constant_op.constant(
[1, 2, 1, 8, 1], dtype=dtypes.float32)
self._sparse_shape_m = constant_op.constant([4, 1])
# Quantiles
self._dense_thresholds_0 = [0.4, 5, 190]
self._dense_thresholds_1 = [-9, 15, 1000]
self._sparse_thresholds_0 = [5, 16, 100]
self._sparse_thresholds_1 = [2, 5]
self._sparse_thresholds_2 = [100]
self._sparse_thresholds_m = [1, 2, 1000]
def testDenseFeaturesOnly(self):
with self.cached_session():
dense_quantiles, _ = quantile_ops.quantiles(
[self._dense_float_tensor_0, self._dense_float_tensor_1], [],
[self._dense_thresholds_0, self._dense_thresholds_1], [], [])
# Dense feature 0
self.assertAllEqual([[0, 0], [0, 0], [1, 0], [2, 0]],
dense_quantiles[0].eval())
# Dense feature 1
self.assertAllEqual([[1, 0], [0, 0], [2, 0], [2, 0]],
dense_quantiles[1].eval())
def testSparseFeaturesOnly(self):
with self.cached_session():
_, sparse_quantiles = quantile_ops.quantiles([], [
self._sparse_values_0, self._sparse_values_1, self._sparse_values_2,
self._sparse_values_m
], [], [
self._sparse_thresholds_0, self._sparse_thresholds_1,
self._sparse_thresholds_2, self._sparse_thresholds_m
], [
self._sparse_indices_0, self._sparse_indices_1,
self._sparse_indices_2, self._sparse_indices_m
])
self.assertAllEqual(4, len(sparse_quantiles))
# Sparse feature 0
self.assertAllEqual([[0, 0], [1, 0], [1, 0], [2, 0]],
sparse_quantiles[0].eval())
# Sparse feature 1
self.assertAllEqual([[0, 0], [1, 0], [0, 0]], sparse_quantiles[1].eval())
# Sparse feature 2
self.assertAllEqual([[0, 0], [0, 0]], sparse_quantiles[2].eval())
# Multidimensional feature.
self.assertAllEqual([[0, 1], [1, 0], [0, 0], [2, 1], [0, 2]],
sparse_quantiles[3].eval())
def testDenseAndSparseFeatures(self):
with self.cached_session():
dense_quantiles, sparse_quantiles = quantile_ops.quantiles(
[self._dense_float_tensor_0, self._dense_float_tensor_1], [
self._sparse_values_0, self._sparse_values_1,
self._sparse_values_2, self._sparse_values_m
], [self._dense_thresholds_0, self._dense_thresholds_1], [
self._sparse_thresholds_0, self._sparse_thresholds_1,
self._sparse_thresholds_2, self._sparse_thresholds_m
], [
self._sparse_indices_0, self._sparse_indices_1,
self._sparse_indices_2, self._sparse_indices_m
])
# Dense feature 0
self.assertAllEqual([[0, 0], [0, 0], [1, 0], [2, 0]],
dense_quantiles[0].eval())
# Dense feature 1
self.assertAllEqual([[1, 0], [0, 0], [2, 0], [2, 0]],
dense_quantiles[1].eval())
# Sparse feature 0
self.assertAllEqual([[0, 0], [1, 0], [1, 0], [2, 0]],
sparse_quantiles[0].eval())
# Sparse feature 1
self.assertAllEqual([[0, 0], [1, 0], [0, 0]], sparse_quantiles[1].eval())
# Sparse feature 2
self.assertAllEqual([[0, 0], [0, 0]], sparse_quantiles[2].eval())
# Multidimensional feature.
self.assertAllEqual([[0, 1], [1, 0], [0, 0], [2, 1], [0, 2]],
sparse_quantiles[3].eval())
def testBucketizeWithInputBoundaries(self):
with self.cached_session():
buckets = quantile_ops.bucketize_with_input_boundaries(
input=[1, 2, 3, 4, 5],
boundaries=[3])
self.assertAllEqual([0, 0, 1, 1, 1], buckets.eval())
def testBucketizeWithInputBoundaries2(self):
with self.cached_session():
boundaries = constant_op.constant([3], dtype=dtypes.float32)
buckets = quantile_ops.bucketize_with_input_boundaries(
input=[1, 2, 3, 4, 5],
boundaries=boundaries)
self.assertAllEqual([0, 0, 1, 1, 1], buckets.eval())
def testBucketizeWithInputBoundaries3(self):
with self.cached_session():
b = array_ops.placeholder(dtypes.float32)
buckets = quantile_ops.bucketize_with_input_boundaries(
input=[1, 2, 3, 4, 5],
boundaries=b)
self.assertAllEqual([0, 1, 1, 2, 2],
buckets.eval(feed_dict={b: [2, 4]}))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/kernel_tests/quantile_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow prediction Ops.
The tests cover tree traversal and additive models for single and
multi class problems.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def _append_to_leaf(leaf, c_id, w):
"""Helper method for building tree leaves.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_id: class Id for the weight update.
w: weight contribution value.
"""
leaf.sparse_vector.index.append(c_id)
leaf.sparse_vector.value.append(w)
def _append_multi_values_to_leaf(leaf, c_ids, w):
"""Helper method for building tree leaves with sparse vector of values.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_ids: list of class ids
w: corresponding weight contributions for the classes in c_ids
"""
for i in range(len(c_ids)):
leaf.sparse_vector.index.append(c_ids[i])
leaf.sparse_vector.value.append(w[i])
def _append_multi_values_to_dense_leaf(leaf, w):
"""Helper method for building tree leaves with dense vector of values.
Appends weight contributions to a leaf. w is assumed to be for all classes.
Args:
leaf: leaf node to append to.
w: corresponding weight contributions for all classes.
"""
for x in w:
leaf.vector.value.append(x)
def _set_float_split(split, feat_col, thresh, l_id, r_id, feature_dim_id=None):
"""Helper method for building tree float splits.
Sets split feature column, threshold and children.
Args:
split: split node to update.
feat_col: feature column for the split.
thresh: threshold to split on forming rule x <= thresh.
l_id: left child Id.
r_id: right child Id.
feature_dim_id: dimension of the feature column to be used in the split.
"""
split.feature_column = feat_col
split.threshold = thresh
split.left_id = l_id
split.right_id = r_id
if feature_dim_id is not None:
split.dimension_id = feature_dim_id
def _set_float_oblivious_split(split, feat_col, thresh):
"""Helper method for building tree float splits.
Sets split feature column and threshold.
Args:
split: split node to update.
feat_col: feature column for the split.
thresh: threshold to split on forming rule x <= thresh.
"""
split.feature_column = feat_col
split.threshold = thresh
def _set_categorical_id_split(split, feat_col, feat_id, l_id, r_id):
"""Helper method for building tree categorical id splits.
Sets split feature column, feature id and children.
Args:
split: categorical id split node.
feat_col: feature column for the split.
feat_id: feature id forming rule x == id.
l_id: left child Id.
r_id: right child Id.
"""
split.feature_column = feat_col
split.feature_id = feat_id
split.left_id = l_id
split.right_id = r_id
class PredictionOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up the prediction tests.
Creates, a batch of two examples having three dense float, two sparse float
single valued, one sparse float multidimensional and one sparse int
features. The data looks like the following:
|Instance |Dense0 |Dense1 |Dense2 |SparseF0 |SparseF1 |SparseI0 |SparseM
| 0 | 7 | 1 | 2 | -3 | | 9,1 | __, 5.0
| 1 | -2 | 2 | 0.5 | | 4 | | 3, ___
"""
super(PredictionOpsTest, self).setUp()
self._dense_float_tensor1 = np.array([[7.0], [-2.0]])
self._dense_float_tensor2 = np.array([[1.0], [2.0]])
self._dense_float_tensor3 = np.array([[2.0], [0.5]])
self._sparse_float_indices1 = np.array([[0, 0]])
self._sparse_float_values1 = np.array([-3.0])
self._sparse_float_shape1 = np.array([2, 1])
self._sparse_float_indices2 = np.array([[1, 0]])
self._sparse_float_values2 = np.array([4.0])
self._sparse_float_shape2 = np.array([2, 1])
# Multi dimensional sparse float
self._sparse_float_indices_m = np.array([[0, 1], [1, 0]])
self._sparse_float_values_m = np.array([5.0, 3.0])
self._sparse_float_shape_m = np.array([2, 2])
self._sparse_int_indices1 = np.array([[0, 0], [0, 1]])
self._sparse_int_values1 = np.array([9, 1])
self._sparse_int_shape1 = np.array([2, 2])
self._seed = 123
def _get_predictions(self,
tree_ensemble_handle,
learner_config,
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=False):
return prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1],
learner_config=learner_config,
apply_dropout=apply_dropout,
apply_averaging=apply_averaging,
center_bias=center_bias,
reduce_dim=reduce_dim)
def _get_predictions_oblivious_case(self,
tree_ensemble_handle,
learner_config,
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=False):
return prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [
self._dense_float_tensor1, self._dense_float_tensor2,
self._dense_float_tensor3
], [], [], [], [], [], [],
learner_config=learner_config,
apply_dropout=apply_dropout,
apply_averaging=apply_averaging,
center_bias=center_bias,
reduce_dim=reduce_dim)
def testEmptyEnsemble(self):
with self.cached_session():
# Empty tree ensenble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="empty")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
self.assertAllEqual([[0], [0]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testBiasEnsembleSingleClass(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="bias")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
self.assertAllClose([[-0.4], [-0.4]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testBiasEnsembleMultiClass(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
leaf = tree.nodes.add().leaf
_append_to_leaf(leaf, 0, -0.4)
_append_to_leaf(leaf, 1, 0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="multiclass")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
self.assertAllClose([[-0.4, 0.9], [-0.4, 0.9]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testFullEnsembleSingleClass(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8.
self.assertAllClose([[-1.3], [0.8]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testObliviousEnsemble(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
_set_float_oblivious_split(
tree2.nodes.add().oblivious_dense_float_binary_split, 0, 5.0)
_set_float_oblivious_split(
tree2.nodes.add().oblivious_dense_float_binary_split, 1, 3.0)
_set_float_oblivious_split(
tree2.nodes.add().oblivious_dense_float_binary_split, 2, 1.0)
for i in range(1, 9):
_append_to_leaf(tree2.nodes.add().leaf, 0, i / 10.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions_oblivious_case(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias -0.4 from first tree and 0.6 from
# the 5th leaf of the second tree corresponding to node_id = 8, hence a
# prediction of 0.2.
# The second example will get bias -0.4 and 0.1 from the 0th leaf of the
# second tree corresponding to node_id = 3, hence a prediction of -0.3
self.assertAllClose([[0.2], [-0.3]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testFullEnsembleWithMultidimensionalSparseSingleClass(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
# Use feature column 2 (sparse multidimensional), split on first value
# node 0.
_set_float_split(
tree2.nodes.add().sparse_float_binary_split_default_right.split,
2,
7.0,
1,
2,
feature_dim_id=0)
# Leafs split on second dimension of sparse multidimensional feature.
# Node 1.
_set_float_split(
tree2.nodes.add().sparse_float_binary_split_default_left.split,
2,
4.5,
3,
4,
feature_dim_id=1)
# Node 2.
_set_float_split(
tree2.nodes.add().sparse_float_binary_split_default_right.split,
2,
9,
5,
6,
feature_dim_id=1)
# Node 3.
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.6)
# Node 4.
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.3)
# Node 5.
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.1)
# Node 6.
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.8)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor1], [
self._sparse_float_indices1, self._sparse_float_indices2,
self._sparse_float_indices_m
], [
self._sparse_float_values1, self._sparse_float_values2,
self._sparse_float_values_m
], [
self._sparse_float_shape1, self._sparse_float_shape2,
self._sparse_float_shape_m
], [self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# The first example will get bias -0.4 from first tree and
# leaf 5 payload of -0.1 hence -0.5, the second example will
# get the same bias -0.4 and leaf 3 payload (0.6) hence 0.2
self.assertAllClose([[-0.5], [0.2]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testExcludeNonFinalTree(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = False
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# All the examples should get only the bias since the second tree is
# non-finalized
self.assertAllClose([[-0.4], [-0.4]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testIncludeNonFinalTree(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = False
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8. Note that the non-finalized tree is included.
self.assertAllClose([[-1.3], [0.8]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testMetadataMissing(self):
# Sometimes we want to do prediction on trees that are not added to ensemble
# (for example in
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
# We are not setting the tree_ensemble_config.tree_metadata in this test.
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8.
self.assertAllClose([[-1.3], [0.8]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For TREE_PER_CLASS strategy, predictions size is num_classes-1
def testFullEnsembleMultiClassTreePerClassStrategy(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias class 1 -0.2 from first tree and
# leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
# the second example will get the same bias class 1 -0.2 and leaf 3
# payload of class 1 1.2 hence [0.0, 1.0].
self.assertAllClose([[0.5, -0.2], [0, 1.0]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For tree-per-class multiclass handling strategies, predictions vec
# will have the size of the number of classes.
# This test is when leafs have SPARSE weights stored (class id and
# contribution).
def testFullEnsembleMultiNotClassTreePerClassStrategySparseVector(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_multi_values_to_leaf(tree2.nodes.add().leaf, [1, 2], [1.2, -0.7])
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=False)
# The first example will get bias class 1 -0.2 from first tree and
# leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
# the second example will get the same bias class 1 -0.2 and leaf 3
# payload of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -0.7].
self.assertAllClose([[0.5, -0.2, 0.0], [0, 1.0, -0.7]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For all non-tree-per class multiclass handling strategies, predictions vec
# will have the size of the number of classes.
# This test is when leafs have DENSE weights stored (weight for each class)
def testFullEnsembleMultiNotClassTreePerClassStrategyDenseVector(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_multi_values_to_dense_leaf(tree1.nodes.add().leaf, [0, -0.2, -2])
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0.5, 0, 0])
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0, 1.2, -0.7])
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [-0.9, 0, 0])
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=False)
# The first example will get bias class 1 -0.2 and -2 for class 2 from
# first tree and leaf 2 payload (sparse feature missing) of 0.5 hence
# 0.5, -0.2], the second example will get the same bias and leaf 3 payload
# of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -2.7].
self.assertAllClose([[0.5, -0.2, -2.0], [0, 1.0, -2.7]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testDropout(self):
with self.cached_session():
# Empty tree ensenble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 1000 trees with some weights.
for i in range(0, 999):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
# Apply dropout.
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# We expect approx 500 trees were dropped.
dropout_info = dropout_info.eval()
self.assertIn(dropout_info[0].size, range(400, 601))
self.assertEqual(dropout_info[0].size, dropout_info[1].size)
for i in range(dropout_info[0].size):
dropped_index = dropout_info[0][i]
dropped_weight = dropout_info[1][i]
# We constructed the trees so tree number + 1 is the tree weight, so
# we can check here the weights for dropped trees.
self.assertEqual(dropped_index + 1, dropped_weight)
# Don't apply dropout.
result_no_dropout, no_dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
self.assertEqual(result.eval().size, result_no_dropout.eval().size)
for i in range(result.eval().size):
self.assertNotEqual(result.eval()[i], result_no_dropout.eval()[i])
# We expect none of the trees were dropped.
self.assertAllEqual([[], []], no_dropout_info.eval())
def testDropoutCenterBiasNoGrowingMeta(self):
# This is for normal non-batch mode where ensemble does not contain the tree
# that is being built currently.
num_trees = 10
with self.cached_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, num_trees):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
# Drop all the trees.
learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
result_center, dropout_info_center = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=True,
reduce_dim=True)
dropout_info = dropout_info.eval()
dropout_info_center = dropout_info_center.eval()
# With centering, the bias tree is not dropped.
num_dropped = dropout_info[0].size
self.assertEqual(num_dropped, num_trees)
num_dropped_center = dropout_info_center[0].size
self.assertEqual(num_dropped_center, num_trees - 1)
result = result.eval()
result_center = result_center.eval()
for i in range(result.size):
self.assertNotEqual(result[i], result_center[i])
# First dropped tree is a bias tree 0.
self.assertEqual(0, dropout_info[0][0])
# Last dropped tree is the last tree.
self.assertEqual(num_trees - 1, dropout_info[0][num_dropped - 1])
# First dropped tree is a tree 1.
self.assertEqual(1, dropout_info_center[0][0])
# Last dropped tree is the last tree.
self.assertEqual(num_trees - 1, dropout_info_center[0][num_dropped_center
- 1])
def testDropoutCenterBiasWithGrowingMeta(self):
# This is batch mode where ensemble already contains the tree that we are
# building. This tree should never be dropped.
num_trees = 10
with self.cached_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, num_trees):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Add growing metadata to indicate batch mode.
tree_ensemble_config.growing_metadata.num_trees_attempted = num_trees
tree_ensemble_config.growing_metadata.num_layers_attempted = num_trees
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
# Drop all the trees.
learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
result_center, dropout_info_center = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=True,
reduce_dim=True)
dropout_info = dropout_info.eval()
dropout_info_center = dropout_info_center.eval()
# Last tree is never dropped, the bias tree can be dropped.
num_dropped = dropout_info[0].size
self.assertEqual(num_dropped, num_trees - 1)
num_dropped_center = dropout_info_center[0].size
self.assertEqual(num_dropped_center, num_trees - 2)
result = result.eval()
result_center = result_center.eval()
for i in range(result.size):
self.assertNotEqual(result[i], result_center[i])
# First dropped tree is a bias tree 0.
self.assertEqual(0, dropout_info[0][0])
# Last dropped tree is not the last tree (not tree num_trees-1).
self.assertNotEqual(num_trees - 1, dropout_info[0][num_dropped - 1])
# First dropped tree is a tree 1.
self.assertEqual(1, dropout_info_center[0][0])
# Last dropped tree is not the last tree in ensemble.
self.assertNotEqual(num_trees - 1,
dropout_info_center[0][num_dropped_center - 1])
def testDropoutSeed(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, 999):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="empty")
resources.initialize_resources(resources.shared_resources()).run()
_, dropout_info_1 = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
_, dropout_info_2 = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# Different seed.
_, dropout_info_3 = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
112314, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# First seed with centering bias.
_, dropout_info_4 = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=True,
reduce_dim=True)
# The same seed returns the same results.
self.assertAllEqual(dropout_info_1.eval(), dropout_info_2.eval())
# Different seeds give diff results.
self.assertNotEqual(dropout_info_3.eval().shape,
dropout_info_2.eval().shape)
# With centering bias and the same seed does not give the same result.
self.assertNotEqual(dropout_info_4.eval(), dropout_info_1.eval())
# With centering bias has 1 less tree dropped (bias tree is not dropped).
self.assertEqual(
len(dropout_info_4.eval()[0]) + 1, len(dropout_info_1.eval()[0]))
def testDropOutZeroProb(self):
with self.cached_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 1000 trees with some weights.
for i in range(0, 999):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Dropout with 0 probability.
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.dropout.dropout_probability = 0.0
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
# Apply dropout, but expect nothing dropped.
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
result_no_dropout, _ = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
self.assertAllEqual([[], []], dropout_info.eval())
self.assertAllClose(result.eval(), result_no_dropout.eval())
def testAveragingAllTrees(self):
with self.cached_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# Add 100 trees with some weights.
# When averaging is applied, the tree weights will essentially change to
# 1, 98/99, 97/99 etc, so lets create the ensemble with such weights.
# too
total_num = 100
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (total_num - i) / total_num)
# Prepare learner config WITH AVERAGING.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.averaging_config.average_last_percent_trees = 1.0
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
# Do averaging.
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config.SerializeToString(),
apply_averaging=True,
reduce_dim=True)
pattern_result, pattern_dropout_info = self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging.SerializeToString(),
apply_averaging=False,
reduce_dim=True)
self.assertAllEqual(result.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())
def testAveragingSomeTrees(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# Add 1000 trees with some weights.
total_num = 100
num_averaged = 25
j = 0
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging - we are adjusting
# the weights of the last 25 trees
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
if i >= 75:
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (num_averaged - j) / num_averaged)
j += 1
else:
adjusted_tree_ensemble_config.tree_weights.append(1.0)
# Prepare learner config WITH AVERAGING.
learner_config_1 = learner_pb2.LearnerConfig()
learner_config_1.num_classes = 2
learner_config_1.averaging_config.average_last_percent_trees = 0.25
# This is equivalent.
learner_config_2 = learner_pb2.LearnerConfig()
learner_config_2.num_classes = 2
learner_config_2.averaging_config.average_last_n_trees = 25
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
result_1, dropout_info_1 = self._get_predictions(
tree_ensemble_handle,
learner_config_1.SerializeToString(),
apply_averaging=True,
reduce_dim=True)
result_2, dropout_info_2 = self._get_predictions(
tree_ensemble_handle,
learner_config_2.SerializeToString(),
apply_averaging=True,
reduce_dim=True)
pattern_result, pattern_dropout_info = self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging.SerializeToString(),
apply_averaging=False,
reduce_dim=True)
self.assertAllEqual(result_1.eval(), pattern_result.eval())
self.assertAllEqual(result_2.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info_1.eval(), pattern_dropout_info.eval())
self.assertAllEqual(dropout_info_2.eval(), pattern_dropout_info.eval())
def testAverageMoreThanNumTreesExist(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# When we say to average over more trees than possible, it is averaging
# across all trees.
total_num = 100
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (total_num - i) / total_num)
# Prepare learner config WITH AVERAGING.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
# We have only 100 trees but we ask to average over 250.
learner_config.averaging_config.average_last_n_trees = 250
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config.SerializeToString(),
apply_averaging=True,
reduce_dim=True)
pattern_result, pattern_dropout_info = self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging.SerializeToString(),
apply_averaging=False,
reduce_dim=True)
self.assertAllEqual(result.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())
class PartitionExamplesOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up the prediction tests.
Create a batch of two examples having three dense float, two sparse float
and one sparse int features.
The data looks like the following:
|Instance |Dense0 |Dense1 |Dense2 |SparseF0 |SparseF1 |SparseI0 |
| 0 | 7 | 1 | 2 | -3 | | 9,1 |
| 1 | -2 | 2 | 0.5 | | 4 | |
"""
super(PartitionExamplesOpsTest, self).setUp()
self._dense_float_tensor1 = np.array([[7.0], [-2.0]])
self._dense_float_tensor2 = np.array([[1.0], [2.0]])
self._dense_float_tensor3 = np.array([[2.0], [0.5]])
self._sparse_float_indices1 = np.array([[0, 0]])
self._sparse_float_values1 = np.array([-3.0])
self._sparse_float_shape1 = np.array([2, 1])
self._sparse_float_indices2 = np.array([[1, 0]])
self._sparse_float_values2 = np.array([4.0])
self._sparse_float_shape2 = np.array([2, 1])
self._sparse_int_indices1 = np.array([[0, 0], [0, 1]])
self._sparse_int_values1 = np.array([9, 1])
self._sparse_int_shape1 = np.array([2, 2])
def testEnsembleEmpty(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1])
self.assertAllEqual([0, 0], result.eval())
def testTreeNonFinalized(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
_set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree1.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)
_set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_metadata.add().is_finalized = False
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1])
self.assertAllEqual([5, 3], result.eval())
def testTreeFinalized(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
_set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree1.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)
_set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1])
self.assertAllEqual([0, 0], result.eval())
def testObliviousTreeNonFinalized(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
_set_float_oblivious_split(
tree1.nodes.add().oblivious_dense_float_binary_split, 0, 5.0)
_set_float_oblivious_split(
tree1.nodes.add().oblivious_dense_float_binary_split, 1, 3.0)
_set_float_oblivious_split(
tree1.nodes.add().oblivious_dense_float_binary_split, 2, 1.0)
for i in range(1, 9):
_append_to_leaf(tree1.nodes.add().leaf, 0, i / 10.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_metadata.add().is_finalized = False
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [
self._dense_float_tensor1,
self._dense_float_tensor2,
self._dense_float_tensor3
], [], [], [], [], [], [])
# The first example goes right, left, right in the tree and the second
# example goes lef, left, left. Since the depth of the tree is 3, the
# partition id's are as follows:
# First example: 3 + 5 = 8
# Second exampel: 3 + 0 = 3
self.assertAllEqual([8, 3], result.eval())
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""training module under boosted_trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/training/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training functions for Gradient boosted decision trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from tensorflow.contrib import learn
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.contrib.boosted_trees.python.ops import training_ops
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import device_setter
# Key names for prediction dict.
ENSEMBLE_STAMP = "ensemble_stamp"
PREDICTIONS = "predictions"
PARTITION_IDS = "partition_ids"
NUM_LAYERS_ATTEMPTED = "num_layers"
NUM_TREES_ATTEMPTED = "num_trees"
NUM_USED_HANDLERS = "num_used_handlers"
USED_HANDLERS_MASK = "used_handlers_mask"
LEAF_INDEX = "leaf_index"
_FEATURE_NAME_TEMPLATE = "%s_%d"
# Keys in Training state.
GBDTTrainingState = collections.namedtuple("GBDTTrainingState", [
"num_layer_examples", "num_layer_steps", "num_layers", "active_tree",
"active_layer", "continue_centering", "bias_stats_accumulator",
"steps_accumulator", "handlers"
])
def _get_column_by_index(tensor, indices):
"""Returns columns from a 2-D tensor by index."""
shape = array_ops.shape(tensor)
p_flat = array_ops.reshape(tensor, [-1])
i_flat = array_ops.reshape(
array_ops.reshape(math_ops.range(0, shape[0]) * shape[1], [-1, 1]) +
indices, [-1])
return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], -1])
def _make_predictions_dict(stamp,
logits,
partition_ids,
ensemble_stats,
used_handlers,
leaf_index=None):
"""Returns predictions for the given logits and n_classes.
Args:
stamp: The ensemble stamp.
logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that
contains predictions when no dropout was applied.
partition_ids: A rank 1 `Tensor` with shape [batch_size].
ensemble_stats: A TreeEnsembleStatsOp result tuple.
used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a
boolean mask.
leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that
contains leaf id for each example prediction.
Returns:
A dict of predictions.
"""
result = {}
result[ENSEMBLE_STAMP] = stamp
result[PREDICTIONS] = logits
result[PARTITION_IDS] = partition_ids
result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers
result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees
result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers
result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask
if leaf_index is not None:
result[LEAF_INDEX] = leaf_index
return result
class _OpRoundRobinStrategy(object):
"""Returns the next ps task index for placement via per-Op round-robin order.
This strategy works slightly better for the GBDT graph because of using
custom resources which vary significantly in compute cost.
"""
def __init__(self, ps_ops, num_tasks):
"""Create a new `_RoundRobinStrategy`.
Args:
ps_ops: List of Op types to place on PS.
num_tasks: Number of ps tasks to cycle among.
"""
next_task = 0
self._next_task_per_op = {}
for op in ps_ops:
self._next_task_per_op[op] = next_task
next_task = (next_task + 1) % num_tasks if num_tasks else 0
self._num_tasks = num_tasks
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
Raises:
ValueError: If attempting to place non-PS Op.
"""
if op.type not in self._next_task_per_op:
raise ValueError("Unknown op type '%s' for placement:" % op.type)
task = self._next_task_per_op[op.type]
self._next_task_per_op[op.type] = ((task + 1) % self._num_tasks
if self._num_tasks else 0)
return task
def extract_features(features, feature_columns, use_core_columns):
"""Extracts columns from a dictionary of features.
Args:
features: `dict` of `Tensor` objects.
feature_columns: A list of feature_columns.
Returns:
Seven values:
- A list of all feature column names.
- A list of dense floats.
- A list of sparse float feature indices.
- A list of sparse float feature values.
- A list of sparse float feature shapes.
- A list of sparse int feature indices.
- A list of sparse int feature values.
- A list of sparse int feature shapes.
Raises:
ValueError: if features is not valid.
"""
if not features:
raise ValueError("Features dictionary must be specified.")
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
features = copy.copy(features)
if feature_columns:
scope = "gbdt"
with variable_scope.variable_scope(scope):
feature_columns = list(feature_columns)
transformed_features = collections.OrderedDict()
for fc in feature_columns:
# pylint: disable=protected-access
if use_core_columns:
# pylint: disable=protected-access
tensor = fc_core._transform_features(features, [fc])[fc]
transformed_features[fc.name] = tensor
elif isinstance(fc, feature_column_lib._EmbeddingColumn):
# pylint: enable=protected-access
transformed_features[fc.name] = fc_core.input_layer(
features, [fc], weight_collections=[scope])
else:
result = feature_column_ops.transform_features(features, [fc])
if len(result) > 1:
raise ValueError("Unexpected number of output features")
transformed_features[fc.name] = result[list(result.keys())[0]]
features = transformed_features
dense_float_names = []
dense_floats = []
sparse_float_names = []
sparse_float_indices = []
sparse_float_values = []
sparse_float_shapes = []
sparse_int_names = []
sparse_int_indices = []
sparse_int_values = []
sparse_int_shapes = []
for key in sorted(features.keys()):
tensor = features[key]
# TODO(nponomareva): consider iterating over feature columns instead.
if isinstance(tensor, tuple):
# Weighted categorical feature.
categorical_tensor = tensor[0]
weight_tensor = tensor[1]
shape = categorical_tensor.dense_shape
indices = array_ops.concat([
array_ops.slice(categorical_tensor.indices, [0, 0], [-1, 1]),
array_ops.expand_dims(
math_ops.cast(categorical_tensor.values, dtypes.int64), -1)
], 1)
tensor = sparse_tensor.SparseTensor(
indices=indices, values=weight_tensor.values, dense_shape=shape)
if isinstance(tensor, sparse_tensor.SparseTensor):
if tensor.values.dtype == dtypes.float32:
sparse_float_names.append(key)
sparse_float_indices.append(tensor.indices)
sparse_float_values.append(tensor.values)
sparse_float_shapes.append(tensor.dense_shape)
elif tensor.values.dtype == dtypes.int64:
sparse_int_names.append(key)
sparse_int_indices.append(tensor.indices)
sparse_int_values.append(tensor.values)
sparse_int_shapes.append(tensor.dense_shape)
else:
raise ValueError("Unsupported sparse feature %s with dtype %s." %
(tensor.indices.name, tensor.dtype))
else:
if tensor.dtype == dtypes.float32:
if len(tensor.shape) > 1 and tensor.shape[1] > 1:
unstacked = array_ops.unstack(tensor, axis=1)
for i in range(len(unstacked)):
dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i))
dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1]))
else:
dense_float_names.append(key)
dense_floats.append(tensor)
else:
raise ValueError("Unsupported dense feature %s with dtype %s." %
(tensor.name, tensor.dtype))
# Feature columns are logically organized into incrementing slots starting
# from dense floats, then sparse floats then sparse ints.
fc_names = (dense_float_names + sparse_float_names + sparse_int_names)
return (fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes)
def _dropout_params(mode, ensemble_stats):
"""Returns parameters relevant for dropout.
Args:
mode: Train/Eval/Infer
ensemble_stats: A TreeEnsembleStatsOp result tuple.
Returns:
Whether to apply dropout and a dropout seed.
"""
if mode == learn.ModeKeys.TRAIN:
# Do dropout only during training.
apply_dropout = True
seed = ensemble_stats.attempted_trees
else:
seed = -1
apply_dropout = False
return apply_dropout, seed
class GradientBoostedDecisionTreeModel(object):
"""A GBDT model function."""
def __init__(self,
is_chief,
num_ps_replicas,
ensemble_handle,
center_bias,
examples_per_layer,
learner_config,
features,
logits_dimension,
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS,
feature_columns=None,
use_core_columns=False,
output_leaf_index=False,
output_leaf_index_modes=None,
num_quantiles=100):
"""Construct a new GradientBoostedDecisionTreeModel function.
Args:
is_chief: Whether to build the chief graph.
num_ps_replicas: Number of parameter server replicas, can be 0.
ensemble_handle: A handle to the ensemble variable.
center_bias: Whether to center the bias before growing trees.
examples_per_layer: Number of examples to accumulate before growing a tree
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
learner_config: A learner config.
features: `dict` of `Tensor` objects.
logits_dimension: An int, the dimension of logits.
loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.
feature_columns: A list of feature columns.
use_core_columns: A boolean specifying whether core feature columns are
used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which
dictates when leaf indices will be outputted. By default, leaf indices
are only outputted in INFER mode.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: if inputs are not valid.
"""
if ensemble_handle is None:
raise ValueError("ensemble_handle must be specified.")
if learner_config is None:
raise ValueError("learner_config must be specified.")
if learner_config.num_classes < 2:
raise ValueError("Number of classes must be >=2")
self._logits_dimension = logits_dimension
self._is_chief = is_chief
self._num_ps_replicas = num_ps_replicas
self._ensemble_handle = ensemble_handle
self._center_bias = center_bias
self._examples_per_layer = examples_per_layer
# Check loss reduction value.
if (loss_reduction != losses.Reduction.SUM and
loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
raise ValueError(
"Invalid loss reduction is provided: %s." % loss_reduction)
self._loss_reduction = loss_reduction
# Fill in the defaults.
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):
if logits_dimension == 1:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
else:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
if logits_dimension == 1 or learner_config.multi_class_strategy == (
learner_pb2.LearnerConfig.TREE_PER_CLASS):
self._gradient_shape = tensor_shape.scalar()
self._hessian_shape = tensor_shape.scalar()
else:
if center_bias:
raise ValueError("Center bias should be False for multiclass.")
self._gradient_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.FULL_HESSIAN):
self._hessian_shape = tensor_shape.TensorShape(
([logits_dimension, logits_dimension]))
else:
# Diagonal hessian strategy.
self._hessian_shape = tensor_shape.TensorShape(([logits_dimension]))
if (learner_config.growing_mode ==
learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
if (learner_config.weak_learner_type == learner_pb2.LearnerConfig
.OBLIVIOUS_DECISION_TREE and learner_config.pruning_mode == learner_pb2
.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.PRE_PRUNE
if (learner_config.pruning_mode ==
learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE
if (learner_config.weak_learner_type == learner_pb2.LearnerConfig
.OBLIVIOUS_DECISION_TREE and
learner_config.pruning_mode == learner_pb2.LearnerConfig.POST_PRUNE):
raise ValueError(
"Post pruning is not implmented for oblivious decision trees.")
if learner_config.constraints.max_tree_depth == 0:
# Use 6 as the default maximum depth.
learner_config.constraints.max_tree_depth = 6
tuner = learner_config.learning_rate_tuner.WhichOneof("tuner")
if not tuner:
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
self._learner_config = learner_config
self._feature_columns = feature_columns
self._learner_config_serialized = learner_config.SerializeToString()
self._num_quantiles = num_quantiles
self._max_tree_depth = variables.VariableV1(
initial_value=self._learner_config.constraints.max_tree_depth)
self._attempted_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="attempted_trees")
self._finalized_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="finalized_trees")
if not features:
raise ValueError("Features dictionary must be specified.")
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices,
sparse_int_values, sparse_int_shapes) = extract_features(
features, self._feature_columns, use_core_columns)
if (learner_config.weak_learner_type == learner_pb2.LearnerConfig
.OBLIVIOUS_DECISION_TREE and sparse_float_indices):
raise ValueError("Oblivious trees don't handle sparse float features yet."
)
logging.info("Active Feature Columns: " + str(fc_names))
logging.info("Learner config: " + str(learner_config))
self._fc_names = fc_names
self._dense_floats = dense_floats
self._sparse_float_indices = sparse_float_indices
self._sparse_float_values = sparse_float_values
self._sparse_float_shapes = sparse_float_shapes
self._sparse_int_indices = sparse_int_indices
self._sparse_int_values = sparse_int_values
self._sparse_int_shapes = sparse_int_shapes
self._reduce_dim = (
self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
learner_config.num_classes == 2)
if output_leaf_index_modes is None:
output_leaf_index_modes = [learn.ModeKeys.INFER]
elif not all(
mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL,
learn.ModeKeys.INFER) for mode in output_leaf_index_modes):
raise ValueError("output_leaf_index_modes should only contain ModeKeys.")
self._output_leaf_index = output_leaf_index
self._output_leaf_index_modes = output_leaf_index_modes
def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):
"""Runs prediction and returns a dictionary of the prediction results.
Args:
ensemble_handle: ensemble resource handle.
ensemble_stamp: stamp of ensemble resource.
mode: learn.ModeKeys.TRAIN or EVAL or INFER.
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
"""
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,
ensemble_stamp)
num_handlers = (
len(self._dense_floats) + len(self._sparse_float_shapes) + len(
self._sparse_int_shapes))
# Used during feature selection.
used_handlers = model_ops.tree_ensemble_used_handlers(
ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers)
# We don't need dropout info - we can always restore it based on the
# seed.
apply_dropout, seed = _dropout_params(mode, ensemble_stats)
# Make sure ensemble stats run. This will check that the ensemble has
# the right stamp.
with ops.control_dependencies(ensemble_stats):
leaf_index = None
if self._output_leaf_index and mode in self._output_leaf_index_modes:
predictions, _, leaf_index = (
prediction_ops).gradient_trees_prediction_verbose(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
else:
leaf_index = None
predictions, _ = prediction_ops.gradient_trees_prediction(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
partition_ids = prediction_ops.gradient_trees_partition_examples(
ensemble_handle,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
use_locking=True)
return _make_predictions_dict(ensemble_stamp, predictions, partition_ids,
ensemble_stats, used_handlers, leaf_index)
def predict(self, mode):
"""Returns predictions given the features and mode.
Args:
mode: Mode the graph is running in (train|predict|eval).
Returns:
A dict of predictions tensors.
Raises:
ValueError: if features is not valid.
"""
# Use the current ensemble to predict on the current batch of input.
# For faster prediction we check if the inputs are on the same device
# as the model. If not, we create a copy of the model on the worker.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
if not input_deps:
raise ValueError("No input tensors for prediction.")
# Get most current model stamp.
ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)
# Determine if ensemble is colocated with the inputs.
if self._ensemble_handle.device != input_deps[0].device:
# Create a local ensemble and get its local stamp.
with ops.name_scope("local_ensemble", "TreeEnsembleVariable"):
local_ensemble_handle = (
gen_model_ops.decision_tree_ensemble_resource_handle_op(
self._ensemble_handle.op.name + "/local_ensemble"))
create_op = gen_model_ops.create_tree_ensemble_variable(
local_ensemble_handle, stamp_token=-1, tree_ensemble_config="")
with ops.control_dependencies([create_op]):
local_stamp = model_ops.tree_ensemble_stamp_token(
local_ensemble_handle)
# Determine whether the local ensemble is stale and update it if needed.
def _refresh_local_ensemble_fn():
# Serialize the model from parameter server after reading the inputs.
with ops.control_dependencies([input_deps[0]]):
(ensemble_stamp, serialized_model) = (
model_ops.tree_ensemble_serialize(self._ensemble_handle))
# Update local ensemble with the serialized model from parameter server.
with ops.control_dependencies([create_op]):
return model_ops.tree_ensemble_deserialize(
local_ensemble_handle,
stamp_token=ensemble_stamp,
tree_ensemble_config=serialized_model), ensemble_stamp
with ops.device(local_ensemble_handle.device):
# Need to colocate stamps for cond.
colocated_ensemble_stamp = array_ops.identity(ensemble_stamp)
refresh_local_ensemble, ensemble_stamp = control_flow_ops.cond(
math_ops.not_equal(colocated_ensemble_stamp,
local_stamp), _refresh_local_ensemble_fn,
lambda: (control_flow_ops.no_op(), colocated_ensemble_stamp))
# Once updated, use the local model for prediction.
with ops.control_dependencies([refresh_local_ensemble]):
return self._predict_and_return_dict(local_ensemble_handle,
ensemble_stamp, mode)
else:
# Use ensemble_handle directly, if colocated.
with ops.device(self._ensemble_handle.device):
return self._predict_and_return_dict(self._ensemble_handle,
ensemble_stamp, mode)
def _get_class_id(self, predictions_dict):
# Handle different multiclass strategies.
if (self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
self._logits_dimension != 1):
# Choose the class for which the tree is built (one vs rest).
return math_ops.cast(
predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension,
dtypes.int32)
return constant_op.constant(-1, dtype=dtypes.int32)
def update_stats(self, loss, predictions_dict, gradients=None, hessians=None):
"""Update the accumulators with stats from this batch.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
gradients: A tensor with the gradients with the respect to logits from
predictions_dict. If not provided, tensorflow will do
autodifferentiation.
hessians: A tensor with the hessians with the respect to logits from
predictions_dict. If not provided, tensorflow will do
autodifferentiation.
Returns:
Three values:
- An op that adds a new tree to the ensemble, and
- An op that increments the stamp but removes all the trees and resets
the handlers. This can be used to reset the state of the ensemble.
- A dict containing the training state.
Raises:
ValueError: if inputs are not valid.
"""
# Get the worker device from input dependencies.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
worker_device = input_deps[0].device
# Get tensors relevant for training and form the loss.
predictions = predictions_dict[PREDICTIONS]
partition_ids = predictions_dict[PARTITION_IDS]
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
if gradients is None:
gradients = gradients_impl.gradients(
loss,
predictions,
name="Gradients",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
strategy = self._learner_config.multi_class_strategy
class_id = self._get_class_id(predictions_dict)
# Handle different multiclass strategies.
if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS:
# We build one vs rest trees.
if self._logits_dimension == 1:
# We have only 1 score, gradients is of shape [batch, 1].
if hessians is None:
hessians = gradients_impl.gradients(
gradients,
predictions,
name="Hessian",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
squeezed_gradients = array_ops.squeeze(gradients, axis=[1])
squeezed_hessians = array_ops.squeeze(hessians, axis=[1])
else:
if hessians is not None:
raise ValueError("Providing hessians is not yet supported here.")
hessian_list = self._diagonal_hessian(gradients, predictions)
# Assemble hessian list into a tensor.
hessians = array_ops.stack(hessian_list, axis=1)
# Use class id tensor to get the column with that index from gradients
# and hessians.
squeezed_gradients = array_ops.squeeze(
_get_column_by_index(gradients, class_id))
squeezed_hessians = array_ops.squeeze(
_get_column_by_index(hessians, class_id))
else:
if hessians is not None:
raise ValueError("Providing hessians is not yet supported here.")
# Other multiclass strategies.
if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN:
hessian_list = self._full_hessian(gradients, predictions)
else:
# Diagonal hessian strategy.
hessian_list = self._diagonal_hessian(gradients, predictions)
squeezed_gradients = gradients
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_hessians = hessians
# Get the weights for each example for quantiles calculation,
weights = self._get_weights(self._hessian_shape, squeezed_hessians)
# Create all handlers ensuring resources are evenly allocated across PS.
fc_name_idx = 0
handlers = []
init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)
l1_regularization = constant_op.constant(
self._learner_config.regularization.l1, dtypes.float32)
l2_regularization = constant_op.constant(
self._learner_config.regularization.l2, dtypes.float32)
tree_complexity_regularization = constant_op.constant(
self._learner_config.regularization.tree_complexity, dtypes.float32)
min_node_weight = constant_op.constant(
self._learner_config.constraints.min_node_weight, dtypes.float32)
loss_uses_sum_reduction = self._loss_reduction == losses.Reduction.SUM
loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)
weak_learner_type = constant_op.constant(
self._learner_config.weak_learner_type)
num_quantiles = self._num_quantiles
epsilon = 1.0 / num_quantiles
strategy_tensor = constant_op.constant(strategy)
with ops.device(self._get_replica_device_setter(worker_device)):
# Create handlers for dense float columns
for dense_float_column_idx in range(len(self._dense_floats)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.DenseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
dense_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
dense_float_column=self._dense_floats[dense_float_column_idx],
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type,
))
fc_name_idx += 1
# Create handlers for sparse float columns.
for sparse_float_column_idx in range(len(self._sparse_float_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.SparseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
sparse_float_column=sparse_tensor.SparseTensor(
self._sparse_float_indices[sparse_float_column_idx],
self._sparse_float_values[sparse_float_column_idx],
self._sparse_float_shapes[sparse_float_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
# Create handlers for sparse int columns.
for sparse_int_column_idx in range(len(self._sparse_int_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
categorical_split_handler.EqualitySplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_int_column_idx),
sparse_int_column=sparse_tensor.SparseTensor(
self._sparse_int_indices[sparse_int_column_idx],
self._sparse_int_values[sparse_int_column_idx],
self._sparse_int_shapes[sparse_int_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type))
fc_name_idx += 1
# Create ensemble stats variables.
num_layer_examples = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_examples",
trainable=False)
num_layer_steps = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_steps",
trainable=False)
num_layers = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layers",
trainable=False)
active_tree = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_tree",
trainable=False)
active_layer = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_layer",
trainable=False)
# Variable that becomes false once bias centering is done.
continue_centering = variables.VariableV1(
initial_value=self._center_bias,
name="continue_centering",
trainable=False)
# Create bias stats accumulator.
bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
name="BiasAccumulator")
# Create steps accumulator.
steps_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar(),
name="StepsAccumulator")
# Create ensemble stats summaries.
summary.scalar("layer_stats/num_examples", num_layer_examples)
summary.scalar("layer_stats/num_steps", num_layer_steps)
summary.scalar("ensemble_stats/active_tree", active_tree)
summary.scalar("ensemble_stats/active_layer", active_layer)
# Update bias stats.
stats_update_ops = []
stats_update_ops.append(
control_flow_ops.cond(
continue_centering,
self._make_update_bias_stats_fn(ensemble_stamp, predictions,
gradients, bias_stats_accumulator,
hessians), control_flow_ops.no_op))
# Update handler stats.
handler_reads = collections.OrderedDict()
for handler in handlers:
handler_reads[handler] = handler.scheduled_reads()
handler_results = batch_ops_utils.run_handler_scheduled_ops(
handler_reads, ensemble_stamp, worker_device)
per_handler_updates = collections.OrderedDict()
# Two values per handler. First one is if the handler is active for the
# current layer. The second one is if the handler is going to be active
# for the next layer.
subsampling_type = self._learner_config.WhichOneof("feature_fraction")
if subsampling_type == "feature_fraction_per_level":
seed = predictions_dict[NUM_LAYERS_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 1])
active_handlers_next_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed + 1, 1])
active_handlers = array_ops.stack(
[active_handlers_current_layer, active_handlers_next_layer], axis=1)
active_handlers = (
active_handlers < self._learner_config.feature_fraction_per_level)
elif subsampling_type == "feature_fraction_per_tree":
seed = predictions_dict[NUM_TREES_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 2])
active_handlers_current_layer = (
active_handlers_current_layer <
self._learner_config.feature_fraction_per_tree)
active_handlers = array_ops.stack(
[
active_handlers_current_layer,
array_ops.ones([len(handlers)], dtype=dtypes.bool)
],
axis=1)
else:
active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)
if self._learner_config.constraints.max_number_of_unique_feature_columns:
target = (
self._learner_config.constraints.max_number_of_unique_feature_columns)
def _feature_selection_active_handlers():
# The active list for current and the next iteration.
used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK],
[-1, 1])
used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1)
return math_ops.logical_and(used_handlers, active_handlers)
active_handlers = (
control_flow_ops.cond(predictions_dict[NUM_USED_HANDLERS] >= target,
_feature_selection_active_handlers,
lambda: active_handlers))
# Prepare empty gradients and hessians when handlers are not ready.
empty_hess_shape = [1] + self._hessian_shape.as_list()
empty_grad_shape = [1] + self._gradient_shape.as_list()
empty_gradients = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_hess_shape)
active_handlers = array_ops.unstack(active_handlers, axis=0)
for handler_idx in range(len(handlers)):
handler = handlers[handler_idx]
is_active = active_handlers[handler_idx]
updates, scheduled_updates = handler.update_stats(
ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians,
empty_gradients, empty_hessians, weights, is_active,
handler_results[handler])
stats_update_ops.append(updates)
per_handler_updates[handler] = scheduled_updates
update_results = batch_ops_utils.run_handler_scheduled_ops(
per_handler_updates, ensemble_stamp, worker_device)
for update in update_results.values():
stats_update_ops += update
training_state = GBDTTrainingState(
num_layer_examples=num_layer_examples,
num_layer_steps=num_layer_steps,
num_layers=num_layers,
active_tree=active_tree,
active_layer=active_layer,
continue_centering=continue_centering,
bias_stats_accumulator=bias_stats_accumulator,
steps_accumulator=steps_accumulator,
handlers=handlers)
reset_op = control_flow_ops.no_op()
if self._is_chief:
# Advance the ensemble stamp to throw away staggered workers.
stamp_token, _ = model_ops.tree_ensemble_serialize(self._ensemble_handle)
next_stamp_token = stamp_token + 1
reset_ops = []
for handler in handlers:
reset_ops.append(handler.reset(stamp_token, next_stamp_token))
if self._center_bias:
reset_ops.append(
bias_stats_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(self._finalized_trees.assign(0).op)
reset_ops.append(self._attempted_trees.assign(0).op)
reset_ops.append(
model_ops.tree_ensemble_deserialize(
self._ensemble_handle,
stamp_token=next_stamp_token,
tree_ensemble_config="",
name="reset_gbdt"))
reset_op = control_flow_ops.group([reset_ops])
return stats_update_ops, reset_op, training_state
def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict,
training_state):
"""Increments number of visited examples and grows the ensemble.
If the number of visited examples reaches the target examples_per_layer,
ensemble is updated.
Args:
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
training_state: `dict` returned by update_stats.
Returns:
An op that updates the counters and potientially grows the ensemble.
"""
batch_size = math_ops.cast(
array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
# Accumulate a step after updating stats.
steps_accumulator = training_state.steps_accumulator
num_layer_examples = training_state.num_layer_examples
num_layer_steps = training_state.num_layer_steps
active_layer = training_state.active_layer
add_step_op = steps_accumulator.add(
ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])
# After adding the step, decide if further processing is needed.
ensemble_update_ops = [add_step_op]
class_id = self._get_class_id(predictions_dict)
with ops.control_dependencies([add_step_op]):
if self._is_chief:
dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]
# Get accumulated steps and examples for the current layer.
_, _, _, _, acc_examples, acc_steps = (
steps_accumulator.saveable.serialize())
acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)
acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)
ensemble_update_ops.append(
num_layer_examples.assign(acc_examples))
ensemble_update_ops.append(num_layer_steps.assign(acc_steps))
# Determine whether we need to update tree ensemble.
examples_per_layer = self._examples_per_layer
if callable(examples_per_layer):
examples_per_layer = examples_per_layer(active_layer)
ensemble_update_ops.append(
control_flow_ops.cond(
acc_examples >= examples_per_layer,
self.make_update_ensemble_fn(ensemble_stamp, training_state,
dropout_seed, class_id),
control_flow_ops.no_op))
# Note, the loss is calculated from the prediction considering dropouts, so
# that the value might look staggering over steps when the dropout ratio is
# high. eval_loss might be referred instead in the aspect of convergence.
return control_flow_ops.group(*ensemble_update_ops)
def make_update_ensemble_fn(self, ensemble_stamp, training_state,
dropout_seed, class_id):
"""A method to create the function which updates the tree ensemble."""
# Determine learning rate.
learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof(
"tuner")
if learning_rate_tuner == "fixed" or learning_rate_tuner == "dropout":
tuner = getattr(self._learner_config.learning_rate_tuner,
learning_rate_tuner)
learning_rate = tuner.learning_rate
else:
# TODO(nponomareva, soroush) do the line search.
raise ValueError("Line search learning rate is not yet supported.")
def _update_ensemble():
"""A method to update the tree ensemble."""
# Get next stamp token.
next_ensemble_stamp = ensemble_stamp + 1
# Finalize bias stats.
_, _, _, bias_grads, bias_hess = (
training_state.bias_stats_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
# Finalize handler splits.
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready,
partition_ids, gains, split_info) = handler.make_splits(
ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
# Stack all the inputs to one tensor per type.
# This is a workaround for the slowness of graph building in tf.cond.
# See (b/36554864).
split_sizes = array_ops.reshape(
array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
# Determine if all splits are ready.
are_all_splits_ready = math_ops.reduce_all(
array_ops.stack(
are_splits_ready_list, axis=0, name="stack_handler_readiness"))
# Define bias centering update operation.
def _center_bias_fn():
# Center tree ensemble bias.
delta_updates = array_ops.where(bias_hess > 0, -bias_grads / bias_hess,
array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
delta_updates=delta_updates,
learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
# Define ensemble growing operations.
def _grow_ensemble_ready_fn():
# Grow the ensemble given the current candidates.
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
# When using the oblivious decision tree as weak learner, it produces
# one gain and one split per handler and not number of partitions.
if self._learner_config.weak_learner_type == (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=learning_rate,
partition_ids=partition_ids_list,
gains=gains_list,
splits=split_info_list,
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
# Don't grow the ensemble, just update the stamp.
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=0,
partition_ids=[],
gains=[],
splits=[],
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
# Conditionally grow an ensemble depending on whether the splits
# from all the handlers are ready.
return control_flow_ops.cond(are_all_splits_ready,
_grow_ensemble_ready_fn,
_grow_ensemble_not_ready_fn)
# Update ensemble.
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering,
_center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
# Update ensemble stats.
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(
self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(
training_state.active_layer.assign(stats.active_layer))
# Flush step stats.
update_ops.extend(
training_state.steps_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name="update_ensemble")
return _update_ensemble
def get_number_of_trees_tensor(self):
return self._finalized_trees, self._attempted_trees
def get_max_tree_depth(self):
return self._max_tree_depth
def train(self, loss, predictions_dict, labels, gradients=None,
hessians=None):
"""Updates the accumalator stats and grows the ensemble.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
labels: Rank 2 `Tensor` representing labels per example. Has no effect
on the training and is only kept for backward compatibility.
gradients: A tensor with the gradients with the respect to logits from
predictions_dict. If not provided, tensorflow will do
autodifferentiation.
hessians: A tensor with the hessians with the respect to logits from
predictions_dict. If not provided, tensorflow will do
autodifferentiation.
Returns:
An op that adds a new tree to the ensemble.
Raises:
ValueError: if inputs are not valid.
"""
del labels # unused; kept for backward compatibility.
update_op, _, training_state = self.update_stats(loss, predictions_dict,
gradients, hessians)
with ops.control_dependencies(update_op):
return self.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
def _get_weights(self, hessian_shape, hessians):
"""Derives weights to be used based on hessians and multiclass strategy."""
if hessian_shape == tensor_shape.scalar():
# This is tree per class.
weights = hessians
elif len(hessian_shape.dims) == 1:
# This is diagonal hessian.
weights = math_ops.reduce_sum(hessians, axis=1)
else:
# This is full hessian.
weights = math_ops.trace(hessians)
return weights
def _full_hessian(self, grads, predictions):
"""Prepares hessians for full-hessian multiclass strategy."""
# Because of
# https://github.com/tensorflow/tensorflow/issues/675, we can't just
# compute the full hessian with a single call to gradients, but instead
# must compute it row-by-row.
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
hessian_rows = []
for row in range(self._logits_dimension):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_i dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
gradients_list[row],
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
hessian_rows.append(hessian_row)
return hessian_rows
def _diagonal_hessian(self, grads, predictions):
"""Prepares hessians for diagonal-hessian multiclass mode."""
diag_hessian_list = []
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
for row, row_grads in enumerate(gradients_list):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
row_grads,
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
# Get dx_i^2 for the whole batch.
elem = array_ops.transpose(hessian_row)[row]
diag_hessian_list.append(elem)
return diag_hessian_list
def _get_replica_device_setter(self, worker_device):
"""Creates a replica device setter."""
ps_tasks = self._num_ps_replicas
ps_ops = list(device_setter.STANDARD_PS_OPS)
ps_ops.extend([
"DecisionTreeEnsembleResourceHandleOp",
"StatsAccumulatorScalarResourceHandleOp",
"StatsAccumulatorTensorResourceHandleOp",
])
ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)
return device_setter.replica_device_setter(
worker_device=worker_device,
ps_tasks=ps_tasks,
merge_devices=True,
ps_ops=ps_ops,
ps_strategy=ps_strategy)
def _make_update_bias_stats_fn(self,
ensemble_stamp,
predictions,
gradients,
bias_stats_accumulator,
hessians=None):
"""A method to create the function which updates the bias stats."""
def _update_bias_stats():
"""A method to update the bias stats."""
# Get reduced gradients and hessians.
grads_sum = math_ops.reduce_sum(gradients, 0)
if hessians is not None:
hess = hessians
else:
hess = gradients_impl.gradients(
grads_sum,
predictions,
name="Hessians",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
# Accumulate gradients and hessians.
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros(
[self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(
ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name="update_bias_stats")
return _update_bias_stats
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT train function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _squared_loss(label, unused_weights, predictions):
"""Unweighted loss implementation."""
loss = math_ops.reduce_sum(
math_ops.squared_difference(predictions, label), 1, keepdims=True)
return loss
def _append_to_leaf(leaf, c_id, w):
"""Helper method for building tree leaves.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_id: class Id for the weight update.
w: weight contribution value.
"""
leaf.sparse_vector.index.append(c_id)
leaf.sparse_vector.value.append(w)
def _set_float_split(split, feat_col, thresh, l_id, r_id):
"""Helper method for building tree float splits.
Sets split feature column, threshold and children.
Args:
split: split node to update.
feat_col: feature column for the split.
thresh: threshold to split on forming rule x <= thresh.
l_id: left child Id.
r_id: right child Id.
"""
split.feature_column = feat_col
split.threshold = thresh
split.left_id = l_id
split.right_id = r_id
class GbdtTest(test_util.TensorFlowTestCase):
def setUp(self):
super(GbdtTest, self).setUp()
def testExtractFeatures(self):
"""Tests feature extraction."""
with self.cached_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_int"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.int64), array_ops.zeros([2],
dtypes.int64))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (
gbdt_batch.extract_features(features, None, use_core_columns=False))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_int"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_int"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(),
features["sparse_int"].values.eval())
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_int"].dense_shape.eval())
def testExtractFeaturesWithTransformation(self):
"""Tests feature extraction."""
with self.cached_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_categorical"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.string), array_ops.zeros([2],
dtypes.int64))
feature_columns = set()
feature_columns.add(layers.real_valued_column("dense_float"))
feature_columns.add(
layers.feature_column._real_valued_var_len_column(
"sparse_float", is_sparse=True))
feature_columns.add(
feature_column_lib.sparse_column_with_hash_bucket(
"sparse_categorical", hash_bucket_size=1000000))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (
gbdt_batch.extract_features(
features, feature_columns, use_core_columns=False))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_categorical"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_categorical"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_categorical"].dense_shape.eval())
def testExtractFeaturesFromCoreFeatureColumns(self):
"""Tests feature extraction when using core columns."""
with self.cached_session():
features = {}
# Sparse float column does not exist in core, so only dense numeric and
# categorical.
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_categorical"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.string), array_ops.zeros([2],
dtypes.int64))
feature_columns = set()
feature_columns.add(core_feature_column.numeric_column("dense_float"))
feature_columns.add(
core_feature_column.categorical_column_with_hash_bucket(
"sparse_categorical", hash_bucket_size=1000000))
(fc_names, dense_floats, _, _, _, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (
gbdt_batch.extract_features(
features, feature_columns, use_core_columns=True))
self.assertEqual(len(fc_names), 2)
self.assertAllEqual(fc_names, ["dense_float", "sparse_categorical"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_categorical"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_categorical"].dense_shape.eval())
def testTrainFnChiefNoBiasCentering(self):
"""Tests the train function running on chief without bias centering."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testObliviousDecisionTreeAsWeakLearner(self):
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.learning_rate_tuner.fixed.learning_rate = 1
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 2
learner_config.constraints.min_node_weight = 0
learner_config.weak_learner_type = (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
learner_config.pruning_mode = learner_pb2.LearnerConfig.PRE_PRUNE
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
features = {}
features["dense_float"] = array_ops.constant([[-2], [-1], [1], [2]],
dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions_dict = gbdt_model.predict(learn.ModeKeys.TRAIN)
predictions = predictions_dict["predictions"]
labels = array_ops.constant([[-2], [-1], [1], [2]], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Second run.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
oblivious_dense_float_binary_split {
threshold: -1.0
}
node_metadata {
gain: 4.5
original_oblivious_leaves {
}
}
}
nodes {
leaf {
vector {
value: -1.5
}
}
}
nodes {
leaf {
vector {
value: 1.5
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
# Third run.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 3)
expected_tree = """
nodes {
oblivious_dense_float_binary_split {
threshold: -1.0
}
node_metadata {
gain: 4.5
original_oblivious_leaves {
}
}
}
nodes {
oblivious_dense_float_binary_split {
threshold: -2.0
}
node_metadata {
gain: 0.25
original_oblivious_leaves {
vector {
value: -1.5
}
}
original_oblivious_leaves {
vector {
value: 1.5
}
}
}
}
nodes {
leaf {
vector {
value: -2.0
}
}
}
nodes {
leaf {
vector {
value: -1.0
}
}
}
nodes {
leaf {
vector {
value: 1.5
}
}
}
nodes {
leaf {
vector {
value: 1.5
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefSparseAndDense(self):
"""Tests the train function with sparse and dense features."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.constant([4, 1], dtypes.int64))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
sparse_float_binary_split_default_right {
split{
left_id: 1
right_id: 2
}
}
node_metadata {
gain: 1.125
}
}
nodes {
leaf {
vector {
value: 1.0
}
}
}
nodes {
leaf {
vector {
value: -0.5
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefScalingNumberOfExamples(self):
"""Tests the train function running on chief without bias centering."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
num_examples_fn = (
lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1)
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=num_examples_fn,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefWithBiasCentering(self):
"""Tests the train function running on chief with bias centering."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect bias to be centered.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
expected_tree = """
nodes {
leaf {
vector {
value: 0.25
}
}
}"""
self.assertEquals(len(output.trees), 1)
self.assertAllEqual(output.tree_weights, [1.0])
self.assertProtoEquals(expected_tree, output.trees[0])
self.assertEquals(stamp_token.eval(), 1)
def testTrainFnNonChiefNoBiasCentering(self):
"""Tests the train function running on worker without bias centering."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testTrainFnNonChiefWithCentering(self):
"""Tests the train function running on worker with bias centering."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testPredictFn(self):
"""Tests the predict function."""
with self.cached_session() as sess:
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
leaf {
vector {
value: 0.25
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
# Create predict op.
mode = model_fn.ModeKeys.EVAL
predictions_dict = sess.run(gbdt_model.predict(mode))
self.assertEquals(predictions_dict["ensemble_stamp"], 3)
self.assertAllClose(predictions_dict["predictions"],
[[0.25], [0.25], [0.25], [0.25]])
self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0])
def testPredictFnWithLeafIndexAdvancedLeft(self):
"""Tests the predict function with output leaf ids."""
with self.cached_session() as sess:
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.15
}
}
}
}
trees {
nodes {
dense_float_binary_split {
threshold: 0.99
left_id: 1
right_id: 2
}
node_metadata {
gain: 00
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.23
}
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.constant(
[[0.0], [1.0], [1.1], [2.0]], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features,
output_leaf_index=True)
# Create predict op.
mode = model_fn.ModeKeys.INFER
predictions_dict = sess.run(gbdt_model.predict(mode))
self.assertEquals(predictions_dict["ensemble_stamp"], 3)
# here are how the numbers in expected results are calculated,
# 0.5 = 0.25 + 0.25
# 0.48 = 0.25 + 0.23
# 0.38 = 0.15 + 0.23
# 0.38 = 0.15 + 0.23
self.assertAllClose(predictions_dict["predictions"],
[[0.5], [0.48], [0.38], [0.38]])
self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0])
self.assertAllClose(predictions_dict["leaf_index"],
[[1, 1], [1, 2], [2, 2], [2, 2]])
def testTrainFnMulticlassFullHessian(self):
"""Tests the GBDT train for multiclass full hessian."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
batch_size = 3
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5,
features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-3.4480, -3.4429, 13.8490, -3.45, -3.4508]
expected_leaf_2 = [-1.2547, -1.3145, 1.52, 2.3875, -1.3264]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 7e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 7e-3)
def testTrainFnMulticlassDiagonalHessian(self):
"""Tests the GBDT train for multiclass diagonal hessian."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
batch_size = 3
features = {}
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5,
features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]
expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
def testTrainFnMulticlassDiagonalHessianOblivious(self):
"""Tests the GBDT train for multiclass diagonal hessian."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.weak_learner_type = (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
learner_config.pruning_mode = learner_pb2.LearnerConfig.PRE_PRUNE
learner_config.constraints.max_tree_depth = 5
learner_config.constraints.min_node_weight = 0
batch_size = 3
features = {}
features["sparse_int"] = sparse_tensor.SparseTensor(
array_ops.constant([[0, 0], [1, 0]], dtypes.int64),
array_ops.constant([1, 2], dtypes.int64),
array_ops.constant([3, 1], dtypes.int64))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5,
features=features)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
predictions_dict = gbdt_model.predict(learn.ModeKeys.TRAIN)
predictions = predictions_dict["predictions"]
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
# Grow 2 layers.
train_op.run()
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 6 nodes: one parent and 4 leafs.
self.assertEqual(len(output.trees[0].nodes), 6)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
print(output.trees[0])
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-1.2497, -1.24976, 4.999, -1.24976, -1.2497]
expected_leaf_2 = [-2.2362, -2.2362, 6.0028, -2.2362, -2.2362]
expected_leaf_3 = [-2.2694, -2.2694, 4.0064, -0.0084, -2.2694]
expected_leaf_4 = [-2.2694, -2.2694, -0.0084, 4.0064, -2.2694]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[3].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_3,
output.trees[0].nodes[4].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_4,
output.trees[0].nodes[5].leaf.vector.value, 1e-3)
def testTrainFnMulticlassTreePerClass(self):
"""Tests the GBDT train for multiclass tree per class strategy."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {
"dense_float":
array_ops.constant([[1.0], [1.5], [2.0]], dtypes.float32),
}
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5,
features=features)
batch_size = 3
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 2.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
# This should result in a tree built for a class 2.
"num_trees": 13,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# One node for a split, two children nodes.
self.assertEqual(3, len(output.trees[0].nodes))
# Leafs will have a sparse vector for class 3.
self.assertEqual(1,
len(output.trees[0].nodes[1].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
self.assertAlmostEqual(
-1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0])
self.assertEqual(1,
len(output.trees[0].nodes[2].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
self.assertAllClose(
0.893284678459,
output.trees[0].nodes[2].leaf.sparse_vector.value[0],
atol=1e-4,
rtol=1e-4)
def testTrainFnChiefFeatureSelectionReachedLimitNoGoodSplit(self):
"""Tests the train function running on chief with feature selection."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.max_number_of_unique_feature_columns = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32)
# Feature 1 is predictive but it won't be used because we have reached the
# limit of num_used_handlers >= max_number_of_unique_feature_columns
features["dense_float_1"] = array_ops.constant([0, 0, 1, 1],
dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions":
predictions,
"predictions_no_dropout":
predictions,
"partition_ids":
partition_ids,
"ensemble_stamp":
ensemble_stamp,
"num_trees":
12,
"num_used_handlers":
array_ops.constant(1, dtype=dtypes.int64),
"used_handlers_mask":
array_ops.constant([True, False], dtype=dtypes.bool),
}
labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
feature_column: 0
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: -0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefFeatureSelectionWithGoodSplits(self):
"""Tests the train function running on chief with feature selection."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.max_number_of_unique_feature_columns = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32)
# Feature 1 is predictive and is in our selected features so it will be
# used even when we're at the limit.
features["dense_float_1"] = array_ops.constant([0, 0, 1, 1],
dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions":
predictions,
"predictions_no_dropout":
predictions,
"partition_ids":
partition_ids,
"ensemble_stamp":
ensemble_stamp,
"num_trees":
12,
"num_used_handlers":
array_ops.constant(1, dtype=dtypes.int64),
"used_handlers_mask":
array_ops.constant([False, True], dtype=dtypes.bool),
}
labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
feature_column: 1
left_id: 1
right_id: 2
}
node_metadata {
gain: 0.5
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}
nodes {
leaf {
vector {
value: -0.5
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefFeatureSelectionReachedLimitIncrementAttemptedLayer(self):
"""Tests the train function running on chief with feature selection."""
with self.cached_session() as sess:
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
_set_float_split(
tree.nodes.add().sparse_float_binary_split_default_right.split, 2,
4.0, 1, 2)
_append_to_leaf(tree.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree.nodes.add().leaf, 1, 1.2)
tree_ensemble_config.tree_weights.append(1.0)
metadata = tree_ensemble_config.tree_metadata.add()
metadata.is_finalized = False
metadata.num_layers_grown = 1
tree_ensemble_config = tree_ensemble_config.SerializeToString()
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config,
name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.max_number_of_unique_feature_columns = 1
learner_config.constraints.min_node_weight = 0
features = {}
# Both features will be disabled since the feature selection limit is
# already reached.
features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32)
features["dense_float_1"] = array_ops.constant([0, 0, 1, 1],
dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions":
predictions,
"predictions_no_dropout":
predictions,
"partition_ids":
partition_ids,
"ensemble_stamp":
ensemble_stamp,
"num_trees":
12,
# We have somehow reached our limit 1. Both of the handlers will be
# disabled.
"num_used_handlers":
array_ops.constant(1, dtype=dtypes.int64),
"used_handlers_mask":
array_ops.constant([False, False], dtype=dtypes.bool),
}
labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertEquals(output.growing_metadata.num_layers_attempted, 1)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
# Make sure the trees are not modified, but the num_layers_attempted is
# incremented so that eventually the training stops.
self.assertEquals(len(output.trees), 1)
self.assertEquals(len(output.trees[0].nodes), 3)
self.assertEquals(output.growing_metadata.num_layers_attempted, 2)
def testResetModelBeforeAndAfterSplit(self):
"""Tests whether resetting works."""
with self.cached_session():
# First build a small tree and train it to verify training works.
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
"max_tree_depth": 4,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
# Create train op.
update_op, reset_op, training_state = gbdt_model.update_stats(
loss, predictions_dict)
with ops.control_dependencies(update_op):
train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
original_stamp = ensemble_stamp.eval()
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
def _train_once_and_check(expect_split):
stamp = ensemble_stamp.eval()
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(stamp_token.eval(), stamp + 1)
if expect_split:
# State of the ensemble after a split occurs.
self.assertEquals(len(output.trees), 1)
self.assertProtoEquals(expected_tree, output.trees[0])
else:
# State of the ensemble after a single accumulation but before any
# splitting occurs
self.assertEquals(len(output.trees), 0)
self.assertProtoEquals("""
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}""", output)
def _run_reset():
stamp_before_reset = ensemble_stamp.eval()
reset_op.run()
stamp_after_reset = ensemble_stamp.eval()
self.assertNotEquals(stamp_after_reset, stamp_before_reset)
_, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertProtoEquals("", output)
return stamp_after_reset
# Exit after one train_op, so no new layer are created but the handlers
# contain enough information to split on the next call to train.
_train_once_and_check(expect_split=False)
self.assertEquals(ensemble_stamp.eval(), original_stamp + 1)
# Reset the handlers so it still requires two training calls to split.
stamp_after_reset = _run_reset()
_train_once_and_check(expect_split=False)
_train_once_and_check(expect_split=True)
self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2)
# This time, test that the reset_op works right after splitting.
stamp_after_reset = _run_reset()
# Test that after resetting, the tree can be trained as normal.
_train_once_and_check(expect_split=False)
_train_once_and_check(expect_split=True)
self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2)
def testResetModelNonChief(self):
"""Tests the reset function on a non-chief worker."""
with self.cached_session():
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
leaf {
vector {
value: 0.25
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: false
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
# Create reset op.
_, reset_op, _ = gbdt_model.update_stats(
loss, predictions_dict)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Reset op doesn't do anything because this is a non-chief worker.
reset_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertEquals(len(output.tree_weights), 1)
self.assertEquals(stamp_token.eval(), 0)
def testResetModelWithCenterBias(self):
"""Tests the reset function running on chief with bias centering."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
# Create train op.
update_op, reset_op, training_state = gbdt_model.update_stats(
loss, predictions_dict)
with ops.control_dependencies(update_op):
train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect bias to be centered.
def train_and_check():
train_op.run()
_, serialized = model_ops.tree_ensemble_serialize(ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
expected_tree = """
nodes {
leaf {
vector {
value: 0.25
}
}
}"""
self.assertEquals(len(output.trees), 1)
self.assertAllEqual(output.tree_weights, [1.0])
self.assertProtoEquals(expected_tree, output.trees[0])
train_and_check()
self.assertEquals(ensemble_stamp.eval(), 1)
reset_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 2)
train_and_check()
self.assertEquals(ensemble_stamp.eval(), 3)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""functions module under boosted_trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/training/functions/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""utils module under boosted_trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/utils/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Losses for Gtflow Estimator and Batch Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses
def per_example_squared_hinge_loss(labels, weights, predictions):
loss = losses.hinge_loss(labels=labels, logits=predictions, weights=weights)
return math_ops.square(loss), control_flow_ops.no_op()
def per_example_logistic_loss(labels, weights, predictions):
"""Logistic loss given labels, example weights and predictions.
Args:
labels: Rank 2 (N, 1) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, 1) tensor of per-example predictions.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example logistic loss.
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.cast(labels, dtypes.float32)
unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
# MUST USE WITH HESSIAN REGULARIZATION,
# This loss can have zero hessian, so it must be used with l2 or min_node_weight
# regularization.
# An example config is
# learner_config.constraints.min_node_weight = 1 / num_examples_per_layer
# learner_config.regularization.l2 = 1.0 / num_examples_per_layer
# TODO(nponomareva): make it multidimensional so we can estimate several
# quantiles at once.
def per_example_quantile_regression_loss(labels, weights, predictions,
quantile):
"""Smoothed loss for quantile regression.
The standard quantile regression loss is quantile*(y-y') when y>y' and
(quantile-1)*(y-y') otherwise, y' is a prediction, y is a label. The impl
below is this loss but squared in the region where the loss value < 1.
Args:
labels: Rank 2 (N, D) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, D) tensor of per-example predictions.
quantile: The quantile to use.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example quantile loss.
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.cast(labels, dtypes.float32)
error = labels - predictions
square_loss_right = array_ops.where(error * quantile < 1.0,
math_ops.square(quantile * error),
quantile * error)
square_loss_left = array_ops.where(error * (quantile - 1) < 1,
math_ops.square((quantile - 1) * error),
(quantile - 1) * error)
unweighted_loss = array_ops.where(error > 0, square_loss_right,
square_loss_left)
if weights is None:
return unweighted_loss, control_flow_ops.no_op()
else:
return unweighted_loss * weights, control_flow_ops.no_op()
# This is classical form of Maximum entropy loss, that is twice differentiable
# (sparse_softmax_cross_entropy which is what we go for is not twice
# differentiable).
def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):
"""Maximum entropy loss for multiclass problems.
Maximum entropy is a generalization of logistic loss for the case when more
than 2 classes are present.
Args:
labels: Rank 2 (N, 1) or Rank 1 (N) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
logits: Rank 2 (N, K) tensor of per-example predictions, K - num of
classes.
num_classes: number of classes in classification task. Used to expand label
indices into one-hot encodings.
eps: tolerance, used as a minimum possible value.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example maxent loss
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.cast(labels, dtypes.int64)
# If labels are of rank 1, make them rank 2.
labels_shape = labels.get_shape()
if len(labels_shape) != 2:
labels = array_ops.expand_dims(labels, 1)
# Labels are indices of classes, convert them to one hot encodings.
target_one_hot = array_ops.one_hot(indices=labels, depth=num_classes)
labels = math_ops.reduce_sum(input_tensor=target_one_hot, axis=[1])
labels = math_ops.cast(labels, dtypes.float32)
# Calculate softmax probabilities for each class.
unnormalized_probs = math_ops.exp(logits)
normalizers = math_ops.reduce_sum(unnormalized_probs, 1, keepdims=True)
softmax_predictions = math_ops.divide(unnormalized_probs,
math_ops.add(normalizers, eps))
# Pull out the probabilities for real label.
probs_for_real_class = math_ops.reduce_sum(labels * softmax_predictions, 1)
# Add handling for values near 0 and 1.
zeros = array_ops.zeros_like(probs_for_real_class, dtype=logits.dtype) + eps
one_minus_eps = array_ops.ones_like(
probs_for_real_class, dtype=logits.dtype) - eps
# Take maximum(eps, pred)
cond = (probs_for_real_class >= eps)
probs_for_real_class = array_ops.where(cond, probs_for_real_class, zeros)
# Take minimum(1-eps, pred)
cond = (probs_for_real_class <= 1 - eps)
probs_for_real_class = array_ops.where(cond, probs_for_real_class,
one_minus_eps)
unweighted_loss = array_ops.expand_dims(-math_ops.log(probs_for_real_class),
1)
if weights is None:
return unweighted_loss, control_flow_ops.no_op()
else:
return unweighted_loss * weights, control_flow_ops.no_op()
def per_example_squared_loss(labels, weights, predictions):
"""Squared loss given labels, example weights and predictions.
Args:
labels: Rank 2 (N, D) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, D) tensor of per-example predictions.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example squared loss.
update_op: An update operation to update the loss's internal state.
"""
unweighted_loss = math_ops.reduce_sum(
math_ops.squared_difference(predictions, labels), 1, keepdims=True)
return unweighted_loss * weights, control_flow_ops.no_op()
def per_example_exp_loss(labels, weights, predictions, name=None, eps=0.1):
"""Trimmed exponential loss given labels, example weights and predictions.
Note that this is only for binary classification.
If logistic loss tries to make sure that the classifier is certain of its
predictions, exp loss says: "as long as it got it correct, even barely, i
don't care". Can be used on noisy data, or when you don't care about getting
the actual probabilities from the model, just the correct label.
The loss returns is exp(-targets*modified_predictions), where
modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive
class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in
the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).
Args:
labels: Rank 2 (N, D) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, D) tensor of per-example predictions.
name: A name for the operation (optional).
eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example exp loss
update_op: An update operation to update the loss's internal state.
"""
def exp_with_logits(name, eps, labels=None, logits=None):
"""Computes exponential loss given `logits`.
The loss returns is exp(-targets*modified_predictions), where
modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive
class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in
the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).
Args:
name: A name for the operation (optional).
eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
exponential losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)"
% (logits.get_shape(), labels.get_shape()))
# Default threshold to switch between classes
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
ones = array_ops.ones_like(logits, dtype=logits.dtype)
neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)
# Convert labels to 1 and -1
cond_labels = (labels > zeros)
labels_converted = array_ops.where(cond_labels, ones, neg_ones)
# Convert predictions to 1 and -1
# The loss we build is min(1, max(-1,ax+b))
# where a=1/eps, b=-1/2eps.
a = 1.0 / eps
b = -1.0 / 2 / eps
probs = math_ops.sigmoid(logits)
y = a * probs + b
# Build max(-1, ax+b)
cond = (y < -1)
max_res = array_ops.where(cond, neg_ones, y)
# Build min part
cond = (max_res > 1)
min_res = array_ops.where(cond, ones, max_res)
preds_converted = min_res
return math_ops.exp(-preds_converted * labels_converted)
labels = math_ops.cast(labels, dtypes.float32)
unweighted_loss = exp_with_logits(
name=name, eps=eps, labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
def per_example_full_exp_loss(labels, weights, predictions, name=None):
"""Full exponential loss given labels, example weights and predictions.
Note that this is only for binary classification.
The loss returns is exp(-targets*logits), where targets are converted to -1
and 1.
Args:
labels: Rank 2 (N, D) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, D) tensor of per-example predictions.
name: A name for the operation (optional).
Returns:
loss: A Rank 2 (N, 1) tensor of per-example exp loss
update_op: An update operation to update the loss's internal state.
"""
def full_exp_with_logits(name, labels=None, logits=None):
"""Computes exponential loss given `logits`.
Args:
name: A name for the operation (optional).
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
exponential losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)"
% (logits.get_shape(), labels.get_shape()))
# Default threshold of 0 to switch between classes
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
ones = array_ops.ones_like(logits, dtype=logits.dtype)
neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)
# Convert labels to 1 and -1
cond_labels = (labels > zeros)
labels_converted = array_ops.where(cond_labels, ones, neg_ones)
return math_ops.exp(-1.0 * logits * labels_converted)
labels = math_ops.cast(labels, dtypes.float32)
unweighted_loss = full_exp_with_logits(
name=name, labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/utils/losses.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trainer hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class LossesTest(test_util.TensorFlowTestCase):
def test_per_example_exp_loss(self):
def _logit(p):
return np.log(p) - np.log(1 - p)
labels_positive = array_ops.ones([10, 1], dtypes.float32)
weights = array_ops.ones([10, 1], dtypes.float32)
labels_negative = array_ops.zeros([10, 1], dtypes.float32)
predictions_probs = np.array(
[[0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7], [0.8], [0.9], [0.99]],
dtype=np.float32)
prediction_logits = _logit(predictions_probs)
eps = 0.2
with self.cached_session():
predictions_tensor = constant_op.constant(
prediction_logits, dtype=dtypes.float32)
loss_for_positives, _ = losses.per_example_exp_loss(
labels_positive, weights, predictions_tensor, eps=eps)
loss_for_negatives, _ = losses.per_example_exp_loss(
labels_negative, weights, predictions_tensor, eps=eps)
pos_loss = loss_for_positives.eval()
neg_loss = loss_for_negatives.eval()
# For positive labels, points <= 0.3 get max loss of e.
# For negative labels, these points have minimum loss of 1/e.
self.assertAllClose(np.exp(np.ones([2, 1])), pos_loss[:2], atol=1e-4)
self.assertAllClose(np.exp(-np.ones([2, 1])), neg_loss[:2], atol=1e-4)
# For positive lables, p oints with predictions 0.7 and larger get minimum
# loss value of 1/e. For negative labels, these points are wrongly
# classified and get loss e.
self.assertAllClose(np.exp(-np.ones([4, 1])), pos_loss[6:10], atol=1e-4)
self.assertAllClose(np.exp(np.ones([4, 1])), neg_loss[6:10], atol=1e-4)
# Points in between 0.5-eps, 0..5+eps get loss exp(-label_m*y), where
# y = 1/eps *x -1/(2eps), where x is the probability and label_m is either
# 1 or -1 (for label of 0).
self.assertAllClose(
np.exp(-(predictions_probs[2:6] * 1.0 / eps - 0.5 / eps)),
pos_loss[2:6], atol=1e-4)
self.assertAllClose(
np.exp(predictions_probs[2:6] * 1.0 / eps - 0.5 / eps),
neg_loss[2:6], atol=1e-4)
def test_per_example_squared_loss(self):
labels = np.array([[0.123], [224.2], [-3], [2], [.3]], dtype=np.float32)
weights = array_ops.ones([5, 1], dtypes.float32)
predictions = np.array(
[[0.123], [23.2], [233], [52], [3]], dtype=np.float32)
with self.cached_session():
loss_tensor, _ = losses.per_example_squared_loss(labels, weights,
predictions)
loss = loss_tensor.eval()
self.assertAllClose(
np.square(labels[:5] - predictions[:5]), loss[:5], atol=1e-4)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/utils/losses_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import and conditionally load custom ops for training boosted trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
# pylint: enable=unused-import
# pylint: disable=wildcard-import
from tensorflow.contrib.boosted_trees.python.ops.gen_training_ops import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/ops/training_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split handler custom ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
from tensorflow.contrib.boosted_trees.python.ops.gen_prediction_ops import gradient_trees_partition_examples
from tensorflow.contrib.boosted_trees.python.ops.gen_prediction_ops import gradient_trees_prediction
from tensorflow.contrib.boosted_trees.python.ops.gen_prediction_ops import gradient_trees_prediction_verbose
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/ops/prediction_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantile ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
# pylint: enable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import gen_quantile_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.contrib.boosted_trees.python.ops.gen_quantile_ops import *
# pylint: enable=wildcard-import,undefined-variable
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import resources
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
# Pattern to remove all non alpha numeric from a string.
_PATTERN = re.compile(r"[\W_]+")
class QuantileAccumulatorSaveable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for QuantileAccumulator."""
def __init__(self, resource_handle, create_op, name):
self._resource_handle = resource_handle
self._create_op = create_op
stamp_token, state, are_buckets_ready, buckets = (
gen_quantile_ops.quantile_accumulator_serialize(resource_handle))
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful in quantile accumulator.
slice_spec = ""
def make_save_spec(tensor, suffix):
return saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name + suffix)
specs = [make_save_spec(stamp_token, "_stamp")]
specs += [make_save_spec(state, "_state")]
specs += [make_save_spec(are_buckets_ready, "_are_buckets_ready")]
specs += [make_save_spec(buckets, "buckets")]
super(QuantileAccumulatorSaveable, self).__init__(self._resource_handle,
specs, name)
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated quantile accumulator from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore.
Returns:
The operation that restores the state of the quantile accumulator.
"""
# Read the restored tensors with the same order that were added to saving
# spec.
stamp_token = restored_tensors[:1]
state = restored_tensors[1:2]
are_buckets_ready = restored_tensors[2:3]
buckets = restored_tensors[3]
with ops.control_dependencies([self._create_op]):
return gen_quantile_ops.quantile_accumulator_deserialize(
self._resource_handle,
stamp_token=stamp_token,
stream_state=state,
are_buckets_ready=are_buckets_ready,
buckets=buckets)
class QuantileAccumulator(tracking.TrackableResource):
"""A resource that allows distributed quantile computation."""
def __init__(self,
init_stamp_token,
epsilon,
num_quantiles,
max_elements=None,
name=None,
container=None,
generate_quantiles=False):
"""Creates a QuantileAccumulator object.
Args:
init_stamp_token: The initial value for the stamp token.
epsilon: Error bound on the quantile computation.
num_quantiles: Number of quantiles to produce from the final summary.
max_elements: Maximum number of elements added to the accumulator.
name: the name to save the accumulator under.
container: An optional `string`. Defaults to `""`
generate_quantiles: Generate quantiles instead of approximate boundaries.
If true, exactly `num_quantiles` will be produced in the final summary.
"""
self._init_stamp_token = init_stamp_token
self._epsilon = epsilon
self._num_quantiles = num_quantiles
self._max_elements = max_elements
self._container = container
self._generate_quantiles = generate_quantiles
super(QuantileAccumulator, self).__init__()
name = _PATTERN.sub("", name)
with ops.name_scope(name, "QuantileAccumulator") as name:
self._name = name
self._resource_handle = self._create_resource()
self._init_op = self._initialize()
is_initialized_op = self.is_initialized()
resources.register_resource(self.resource_handle, self._init_op,
is_initialized_op)
self._saveable = QuantileAccumulatorSaveable(self.resource_handle,
self._init_op, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
def _create_resource(self):
return gen_quantile_ops.quantile_stream_resource_handle_op(
container=self._container, shared_name=self._name, name=self._name)
def _initialize(self):
return gen_quantile_ops.create_quantile_accumulator(
self.resource_handle,
self._init_stamp_token,
epsilon=self._epsilon,
max_elements=self._max_elements,
num_quantiles=self._num_quantiles,
generate_quantiles=self._generate_quantiles)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return gen_quantile_ops.quantile_accumulator_is_initialized(
self.resource_handle)
def _gather_saveables_for_checkpoint(self):
return {"quantile_accumulator", self.saveable}
def get_buckets(self, stamp_token):
"""Returns quantile buckets created during previous flush."""
are_buckets_ready, buckets = (
gen_quantile_ops.quantile_accumulator_get_buckets(
quantile_accumulator_handles=[self.resource_handle],
stamp_token=stamp_token))
return are_buckets_ready[0], buckets[0]
def schedule_get_buckets(self):
"""Returns a scheduled read of buckets created during previous flush."""
return batch_ops_utils.ScheduledStampedResourceOp(
resource_handle=self.resource_handle,
op=gen_quantile_ops.quantile_accumulator_get_buckets)
def _make_summary(self, column, example_weights):
if isinstance(column, sparse_tensor.SparseTensor):
return gen_quantile_ops.make_quantile_summaries(
dense_float_features=[],
sparse_float_feature_indices=[column.indices],
sparse_float_feature_values=[column.values],
sparse_float_feature_shapes=[column.dense_shape],
example_weights=example_weights,
epsilon=self._epsilon / 2).sparse_summaries[0]
else:
return gen_quantile_ops.make_quantile_summaries(
dense_float_features=[column],
sparse_float_feature_indices=[],
sparse_float_feature_values=[],
sparse_float_feature_shapes=[],
example_weights=example_weights,
epsilon=self._epsilon / 2).dense_summaries[0]
def add_summary(self, stamp_token, column, example_weights):
"""Adds quantile summary to its stream in resource."""
summary = self._make_summary(column, example_weights)
return gen_quantile_ops.quantile_accumulator_add_summaries(
quantile_accumulator_handles=[self.resource_handle],
stamp_token=stamp_token,
summaries=[summary])
def add_prebuilt_summary(self, stamp_token, summary):
"""Adds quantile summary to its stream in resource."""
return gen_quantile_ops.quantile_accumulator_add_summaries(
quantile_accumulator_handles=[self.resource_handle],
stamp_token=stamp_token,
summaries=[summary])
def schedule_add_summary(self, stamp_token, column, example_weights):
"""Schedules to add a quantile summary to its stream in resource."""
summary = self._make_summary(column, example_weights)
return batch_ops_utils.ScheduledStampedResourceOp(
op=gen_quantile_ops.quantile_accumulator_add_summaries,
resource_handle=self.resource_handle,
summaries=summary)
def flush(self, stamp_token, next_stamp_token):
"""Finalizes quantile summary stream and resets it for next iteration.
Args:
stamp_token: Expected current token.
next_stamp_token: Next value for the token.
Returns:
The flush operation.
"""
return gen_quantile_ops.quantile_accumulator_flush(
quantile_accumulator_handle=self.resource_handle,
stamp_token=stamp_token,
next_stamp_token=next_stamp_token)
def flush_summary(self, stamp_token, next_stamp_token):
"""Finalizes quantile summary stream and resets it for next iteration."""
result = gen_quantile_ops.quantile_accumulator_flush_summary(
quantile_accumulator_handle=self.resource_handle,
stamp_token=stamp_token,
next_stamp_token=next_stamp_token)
return result
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/ops/quantile_ops.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split handler custom ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
# pylint: enable=unused-import
# pylint: disable=wildcard-import
from tensorflow.contrib.boosted_trees.python.ops.gen_split_handler_ops import *
# pylint: enable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/ops/split_handler_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for batching remote OPs together to reduce RPC overhead."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
@six.add_metaclass(abc.ABCMeta)
class ScheduledOp(object):
"""Represents a scheduled remote operation."""
@abc.abstractmethod
def batching_key(self):
"""Returns the key for batching operations."""
@abc.abstractmethod
def batch_runner_fn(self):
"""Returns the function that executes the operation on the batch."""
class ScheduledStampedResourceOp(ScheduledOp):
"""Wrapper class for batched operations on stamped resources."""
def __init__(self, resource_handle, op, **kwargs):
self.resource_handle = resource_handle
self.op = op
self.args = kwargs
def batching_key(self):
# We want to group the same operations on the same device and run them in
# one batch. So we use (device, operation) as the key.
return self.resource_handle.device, self.op
def batch_runner_fn(self):
return _scheduled_stamp_resource_op_runner
def _move_tensors(tensors, device):
"""Moves a list of tensors to a device by concatenating/splitting them."""
# Reset the device setting to avoid weird interactions with device merging
# logic.
zero = constant_op.constant(0, dtype=dtypes.int32)
with ops.device(None):
if all(tensor.shape == tensor_shape.scalar() for tensor in tensors):
with ops.device(tensors[0].device):
values = array_ops.stack(tensors)
with ops.device(device):
return array_ops.unstack(values)
else:
with ops.device(tensors[0].device):
sizes = array_ops.stack(array_ops.shape_n(tensors))[:, 0]
values = array_ops.concat(tensors, axis=zero)
with ops.device(device):
sizes = array_ops.unstack(sizes)
return list(array_ops.split(values, sizes, axis=zero))
def _scheduled_stamp_resource_op_runner(batch, stamp):
"""Runs a batch operation on a stamped resource."""
if not batch:
return
arg_keys = set(batch[0].args.keys())
grouped_args = collections.OrderedDict()
resource_handles = []
# Check that the set of arguments is the same across all the scheduled ops.
for op in batch:
if set(op.args.keys()) != arg_keys:
raise ValueError("Mismatching arguments: %s, %s.", op.args, arg_keys)
for key in arg_keys:
grouped_args.setdefault(key, []).append(op.args[key])
resource_handles.append(op.resource_handle)
# Move all the inputs to the op device in one RPC.
grouped_args = collections.OrderedDict(
(k, _move_tensors(v, resource_handles[0].device))
for k, v in sorted(grouped_args.items()))
with ops.device(resource_handles[0].device):
return batch[0].op(resource_handles, stamp, **grouped_args)
def run_handler_scheduled_ops(per_handler_ops, stamp, worker_device):
"""Given a dictionary of ops for each handler, runs them in batch."""
batched_ops = collections.OrderedDict()
# Group the ops by their batching_key. Ops that share the same batching key
# can be executed together.
for handler in per_handler_ops.keys():
for op in per_handler_ops[handler]:
key = (op.batching_key(), op.batch_runner_fn())
batched_ops.setdefault(key, []).append(op)
op_results = {}
for batch in batched_ops.values():
# Run each of the batched ops using its runner.
results = batch[0].batch_runner_fn()(batch, stamp)
# If the result is a tuple, move each entry in the tuple in one RPC.
if isinstance(results, tuple):
results = tuple(
_move_tensors(result, worker_device) for result in results)
# Once all the results are on the worker, create individual tuple for
# each scheduled op request.
for i in range(len(batch)):
op_results[batch[i]] = tuple(result[i] for result in results)
# If the result is a tuple, it didn't have any outputs, so use the
# `ops.Operation` as the result for all the scheduled ops.
elif isinstance(results, ops.Operation):
for i in range(len(batch)):
op_results[batch[i]] = results
else:
raise ValueError("Unknown type of result %s.", results)
handler_results = collections.defaultdict(list)
# Dispatch the results of the ScheduledOps to the handlers that requested
# them.
for handler in per_handler_ops.keys():
for op in per_handler_ops[handler]:
handler_results[handler].append(op_results[op])
return handler_results
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/ops/batch_ops_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
# pylint: enable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_deserialize
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_serialize
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_stamp_token
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_used_handlers
# pylint: enable=unused-import
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
ops.NotDifferentiable("TreeEnsembleVariable")
ops.NotDifferentiable("TreeEnsembleSerialize")
ops.NotDifferentiable("TreeEnsembleDeserialize")
class TreeEnsembleVariableSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for TreeEnsembleVariable."""
def __init__(self, tree_ensemble_handle, create_op, name):
"""Creates a TreeEnsembleVariableSavable object.
Args:
tree_ensemble_handle: handle to the tree ensemble variable.
create_op: the op to initialize the variable.
name: the name to save the tree ensemble variable under.
"""
stamp_token, ensemble_config = tree_ensemble_serialize(tree_ensemble_handle)
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree ensemble variable. So we just pass an empty
# value.
slice_spec = ""
specs = [
saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,
name + "_stamp"),
saver.BaseSaverBuilder.SaveSpec(ensemble_config, slice_spec,
name + "_config"),
]
super(TreeEnsembleVariableSavable, self).__init__(tree_ensemble_handle,
specs, name)
self._tree_ensemble_handle = tree_ensemble_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree ensemble from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree ensemble variable.
"""
with ops.control_dependencies([self._create_op]):
return tree_ensemble_deserialize(
self._tree_ensemble_handle,
stamp_token=restored_tensors[0],
tree_ensemble_config=restored_tensors[1])
class TreeEnsembleVariable(tracking.TrackableResource):
"""A Tree ensemble model."""
def __init__(self, stamp_token, tree_ensemble_config, name, container=None):
self._stamp_token = stamp_token
self._tree_ensemble_config = tree_ensemble_config
self._name = name
self._container = container
self._init_op = None
super(TreeEnsembleVariable, self).__init__()
def _create_resource(self):
return gen_model_ops.decision_tree_ensemble_resource_handle_op(
self._container, shared_name=self._name, name=self._name)
def _initialize(self):
return gen_model_ops.create_tree_ensemble_variable(
self.resource_handle, self._stamp_token, self._tree_ensemble_config)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return gen_model_ops.tree_ensemble_is_initialized_op(self.resource_handle)
def _gather_saveables_for_checkpoint(self):
return {
self.resource_handle.op.name + "/tree_ensemble_variable":
functools.partial(
TreeEnsembleVariableSavable,
tree_ensemble_handle=self.resource_handle,
create_op=self.initializer)
}
def tree_ensemble_variable(stamp_token,
tree_ensemble_config,
name,
container=None):
r"""Creates a tree ensemble model and returns a handle to it.
Args:
stamp_token: The initial stamp token value for the ensemble resource.
tree_ensemble_config: A `Tensor` of type `string`. Serialized proto of the
tree ensemble.
name: A name for the ensemble variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the tree ensemble.
"""
with ops.name_scope(name, "TreeEnsembleVariable") as name:
tree_ensemble_var = TreeEnsembleVariable(stamp_token, tree_ensemble_config,
name, container)
resource_handle = tree_ensemble_var.resource_handle
create_op = tree_ensemble_var.initializer
is_initialized_op = tree_ensemble_var.is_initialized()
# Adds the variable to the savable list.
saveable = TreeEnsembleVariableSavable(resource_handle, create_op,
resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(resource_handle, create_op, is_initialized_op)
return resource_handle
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/ops/model_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loads the _boosted_trees_ops.so when the binary is not statically linked."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.framework import errors
from tensorflow.python.platform import resource_loader
# Conditionally load ops, they might already be statically linked in.
try:
loader.load_op_library(
resource_loader.get_path_to_datafile('_boosted_trees_ops.so'))
except (errors.NotFoundError, IOError):
print('Error loading _boosted_trees_ops.so')
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/ops/boosted_trees_ops_loader.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stats Accumulator ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
# pylint: enable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import gen_stats_accumulator_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import resources
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
# Pattern to remove all non alpha numeric from a string.
_PATTERN = re.compile(r"[\W_]+")
class StatsAccumulatorSaveable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for StatsAccumulator."""
def __init__(self, resource_handle, create_op, is_scalar, name):
self._create_op = create_op
self._resource_handle = resource_handle
self._is_scalar = is_scalar
slice_spec = ""
saver_name = self._resource_handle.name
(stamp_token, num_updates, partition_ids, feature_ids, gradients,
hessians) = self.serialize()
specs = [
saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,
saver_name + "_stamp"),
saver.BaseSaverBuilder.SaveSpec(num_updates, slice_spec,
saver_name + "_num_updates"),
saver.BaseSaverBuilder.SaveSpec(partition_ids, slice_spec,
saver_name + "_partition_ids"),
saver.BaseSaverBuilder.SaveSpec(feature_ids, slice_spec,
saver_name + "_feature_ids"),
saver.BaseSaverBuilder.SaveSpec(gradients, slice_spec,
saver_name + "_gradients"),
saver.BaseSaverBuilder.SaveSpec(hessians, slice_spec,
saver_name + "hessians"),
]
super(StatsAccumulatorSaveable, self).__init__(self._resource_handle, specs,
name)
def serialize(self):
"""Serializes the stats accumulator state."""
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_serialize(
self._resource_handle)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_serialize(
self._resource_handle)
def deserialize(self, stamp_token, num_updates, partition_ids, feature_ids,
gradients, hessians):
"""Resets the stats accumulator with the serialized state."""
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_deserialize(
self._resource_handle, stamp_token, num_updates, partition_ids,
feature_ids, gradients, hessians)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_deserialize(
self._resource_handle, stamp_token, num_updates, partition_ids,
feature_ids, gradients, hessians)
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree ensemble from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree ensemble variable.
"""
with ops.control_dependencies([self._create_op]):
return self.deserialize(
stamp_token=restored_tensors[0],
num_updates=restored_tensors[1],
partition_ids=restored_tensors[2],
feature_ids=restored_tensors[3],
gradients=restored_tensors[4],
hessians=restored_tensors[5])
class StatsAccumulator(tracking.TrackableResource):
"""A resource that allows to accumulate gradients and hessians.
For consistency guarantees, we use read and write stamp tokens.
The stamp token on the resource is updated with StatsAccumulator.flush.
Calls to StatsAccumulator.add that don't provide the current stamp token are
ignored.
"""
def __init__(self,
stamp_token,
gradient_shape,
hessian_shape,
name=None,
container=None):
"""Creates a stats accumulator and returns a handle to it.
Args:
stamp_token: An int64, initial value to use for the stamp token.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
name: A name for the stats accumulator variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the stats accumulator.
"""
self._stamp_token = stamp_token
self._gradient_shape = gradient_shape
self._hessian_shape = hessian_shape
self._container = container
if (gradient_shape == tensor_shape.scalar() and
hessian_shape == tensor_shape.scalar()):
self._is_scalar = True
else:
self._is_scalar = False
if name is not None:
name = _PATTERN.sub("", name)
with ops.name_scope(name, "StatsAccumulator") as name:
self._name = name
self._resource_handle = self._create_resource()
self._init_op = self._initialize()
is_initialized_op = self.is_initialized()
resources.register_resource(self.resource_handle, self.initializer,
is_initialized_op)
self._saveable = StatsAccumulatorSaveable(
self.resource_handle, self.initializer, self._is_scalar, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
def _create_resource(self):
if self._is_scalar:
return (
gen_stats_accumulator_ops.stats_accumulator_scalar_resource_handle_op(
self._container, self._name, name=self._name))
else:
return (
gen_stats_accumulator_ops.stats_accumulator_tensor_resource_handle_op(
self._container, self._name, name=self._name))
def _initialize(self):
if self._is_scalar:
return gen_stats_accumulator_ops.create_stats_accumulator_scalar(
self.resource_handle, self._stamp_token)
else:
return gen_stats_accumulator_ops.create_stats_accumulator_tensor(
self.resource_handle, self._stamp_token,
self._gradient_shape.as_list(), self._hessian_shape.as_list())
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_is_initialized(
self.resource_handle)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_is_initialized(
self.resource_handle)
@property
def saveable(self):
return self._saveable
def _gather_saveables_for_checkpoint(self):
return {"stats_accumulator", self.saveable}
def add(self, stamp_token, partition_ids, feature_ids, gradients, hessians):
"""Updates the stats accumulator."""
partition_ids, feature_ids, gradients, hessians = (self._make_summary(
partition_ids, feature_ids, gradients, hessians))
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_add(
[self.resource_handle], stamp_token, [partition_ids], [feature_ids],
[gradients], [hessians])
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_add(
[self.resource_handle], stamp_token, [partition_ids], [feature_ids],
[gradients], [hessians])
def schedule_add(self, partition_ids, feature_ids, gradients, hessians):
"""Schedules an update to the stats accumulator."""
partition_ids, feature_ids, gradients, hessians = (self._make_summary(
partition_ids, feature_ids, gradients, hessians))
if self._is_scalar:
return batch_ops_utils.ScheduledStampedResourceOp(
op=gen_stats_accumulator_ops.stats_accumulator_scalar_add,
resource_handle=self.resource_handle,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians)
else:
return batch_ops_utils.ScheduledStampedResourceOp(
op=gen_stats_accumulator_ops.stats_accumulator_tensor_add,
resource_handle=self.resource_handle,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians)
def _make_summary(self, partition_ids, feature_ids, gradients, hessians):
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_make_summary(
partition_ids, feature_ids, gradients, hessians)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_make_summary(
partition_ids, feature_ids, gradients, hessians)
def flush(self, stamp_token, next_stamp_token):
"""Flushes the stats accumulator."""
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
self.resource_handle, stamp_token, next_stamp_token)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_flush(
self.resource_handle, stamp_token, next_stamp_token)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/python/ops/stats_accumulator_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Regression on Boston housing data using DNNBoostedTreeCombinedRegressor.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/boston_combined.py \
--batch_size=404 --output_dir="/tmp/boston" \
--dnn_hidden_units="8,4" --dnn_steps_to_train=1000 \
--tree_depth=4 --tree_learning_rate=0.1 \
--num_trees=100 --tree_l2=0.001 --num_eval_steps=1 \
--vmodule=training_ops=1
When training is done, mean squared error on eval data is reported.
Point tensorboard to the directory for the run to see how the training
progresses:
tensorboard --logdir=/tmp/boston
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch.dnn_tree_combined_estimator import DNNBoostedTreeCombinedRegressor
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
_BOSTON_NUM_FEATURES = 13
def _get_estimator(output_dir, feature_cols):
"""Configures DNNBoostedTreeCombinedRegressor based on flags."""
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = (
FLAGS.tree_learning_rate)
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.tree_l2
learner_config.constraints.max_tree_depth = FLAGS.tree_depth
run_config = tf.contrib.learn.RunConfig(save_summary_steps=1)
# Create a DNNBoostedTreeCombinedRegressor estimator.
estimator = DNNBoostedTreeCombinedRegressor(
dnn_hidden_units=[int(x) for x in FLAGS.dnn_hidden_units.split(",")],
dnn_feature_columns=feature_cols,
tree_learner_config=learner_config,
num_trees=FLAGS.num_trees,
# This should be the number of examples. For large datasets it can be
# larger than the batch_size.
tree_examples_per_layer=FLAGS.batch_size,
model_dir=output_dir,
config=run_config,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=FLAGS.dnn_steps_to_train)
return estimator
def _make_experiment_fn(output_dir):
"""Creates experiment for DNNBoostedTreeCombinedRegressor."""
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.boston_housing.load_data()
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": x_train},
y=y_train,
batch_size=FLAGS.batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": x_test}, y=y_test, num_epochs=1, shuffle=False)
feature_columns = [
feature_column.real_valued_column("x", dimension=_BOSTON_NUM_FEATURES)
]
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
export_strategies = [
saved_model_export_utils.make_export_strategy(serving_input_fn)]
return tf.contrib.learn.Experiment(
estimator=_get_estimator(output_dir, feature_columns),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None,
export_strategies=export_strategies)
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for configuring DNNBoostedTreeCombinedRegressor.
parser.add_argument(
"--dnn_hidden_units",
type=str,
default="8,4",
help="Hidden layers for DNN.")
parser.add_argument(
"--dnn_steps_to_train",
type=int,
default=1000,
help="Number of steps to train DNN.")
parser.add_argument(
"--tree_depth", type=int, default=4, help="Maximum depth of trees.")
parser.add_argument(
"--tree_l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--tree_learning_rate",
type=float,
default=0.1,
help=("Learning rate (shrinkage weight) with which each "
"new tree is added."))
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/examples/boston_combined.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates a regression on Boston housing data.
This example demonstrates how to run experiments with TF Boosted Trees on
a regression dataset. We split all the data into 20% test and 80% train,
and are using l2 loss and l2 regularization.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/boston.py \
--batch_size=404 --output_dir="/tmp/boston" --depth=4 --learning_rate=0.1 \
--num_eval_steps=1 --num_trees=500 --l2=0.001 \
--vmodule=training_ops=1
When training is done, mean squared error on eval data is reported.
Point tensorboard to the directory for the run to see how the training
progresses:
tensorboard --logdir=/tmp/boston
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch import custom_export_strategy
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeRegressor
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn import learn_runner
from tensorflow.python.util import compat
_BOSTON_NUM_FEATURES = 13
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir, feature_cols):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2
learner_config.constraints.max_tree_depth = FLAGS.depth
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
# Create a TF Boosted trees regression estimator.
estimator = GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
# This should be the number of examples. For large datasets it can be
# larger than the batch_size.
examples_per_layer=FLAGS.batch_size,
feature_columns=feature_cols,
label_dimension=1,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _convert_fn(dtec, sorted_feature_names, num_dense, num_sparse_float,
num_sparse_int, export_dir, unused_eval_result):
universal_format = custom_export_strategy.convert_to_universal_format(
dtec, sorted_feature_names, num_dense, num_sparse_float, num_sparse_int)
with tf.gfile.GFile(os.path.join(
compat.as_bytes(export_dir), compat.as_bytes("tree_proto")), "w") as f:
f.write(str(universal_format))
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.boston_housing.load_data()
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": x_train},
y=y_train,
batch_size=FLAGS.batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": x_test}, y=y_test, num_epochs=1, shuffle=False)
feature_columns = [
feature_column.real_valued_column("x", dimension=_BOSTON_NUM_FEATURES)
]
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = tf.contrib.learn.utils.build_parsing_serving_input_fn(
feature_spec)
# An export strategy that outputs the feature importance and also exports
# the internal tree representation in another format.
export_strategy = custom_export_strategy.make_custom_export_strategy(
"exports",
convert_fn=_convert_fn,
feature_columns=feature_columns,
export_input_fn=serving_input_fn)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir, feature_columns),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None,
export_strategies=[export_strategy])
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
parser.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/examples/boston.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates multiclass MNIST TF Boosted trees example.
This example demonstrates how to run experiments with TF Boosted Trees on
a binary dataset. We use digits 4 and 9 from the original MNIST dataset.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/binary_mnist.py \
--output_dir="/tmp/binary_mnist" --depth=4 --learning_rate=0.3 \
--batch_size=10761 --examples_per_layer=10761 --eval_batch_size=1030 \
--num_eval_steps=1 --num_trees=10 --l2=1 --vmodule=training_ops=1
When training is done, accuracy on eval data is reported. Point tensorboard
to the directory for the run to see how the training progresses:
tensorboard --logdir=/tmp/binary_mnist
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.learn import learn_runner
def get_input_fn(data,
batch_size,
capacity=10000,
min_after_dequeue=3000):
"""Input function over MNIST data."""
# Keep only 4 and 9 digits.
ids = np.where((data.labels == 4) | (data.labels == 9))
images = data.images[ids]
labels = data.labels[ids]
# Make digit 4 label 1, 9 is 0.
labels = labels == 4
def _input_fn():
"""Prepare features and labels."""
images_batch, labels_batch = tf.train.shuffle_batch(
tensors=[images,
labels.astype(np.int32)],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=4)
features_map = {"images": images_batch}
return features_map, labels_batch
return _input_fn
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2 / FLAGS.examples_per_layer
learner_config.constraints.max_tree_depth = FLAGS.depth
growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.growing_mode = growing_mode
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
# Create a TF Boosted trees estimator that can take in custom loss.
estimator = GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
examples_per_layer=FLAGS.examples_per_layer,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
data = tf.contrib.learn.datasets.mnist.load_mnist()
train_input_fn = get_input_fn(data.train, FLAGS.batch_size)
eval_input_fn = get_input_fn(data.validation, FLAGS.eval_batch_size)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None)
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--eval_batch_size",
type=int,
default=1000,
help="Size of the batch for eval.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
parser.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
parser.add_argument(
"--examples_per_layer",
type=int,
default=1000,
help="Number of examples to accumulate stats for per layer.")
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/examples/binary_mnist.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates multiclass MNIST TF Boosted trees example.
This example demonstrates how to run experiments with TF Boosted Trees on
a MNIST dataset. We are using layer by layer boosting with diagonal hessian
strategy for multiclass handling, and cross entropy loss.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/mnist.py \
--output_dir="/tmp/mnist" --depth=4 --learning_rate=0.3 --batch_size=60000 \
--examples_per_layer=60000 --eval_batch_size=10000 --num_eval_steps=1 \
--num_trees=10 --l2=1 --vmodule=training_ops=1
When training is done, accuracy on eval data is reported. Point tensorboard
to the directory for the run to see how the training progresses:
tensorboard --logdir=/tmp/mnist
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.learn import learn_runner
def get_input_fn(dataset_split,
batch_size,
capacity=10000,
min_after_dequeue=3000):
"""Input function over MNIST data."""
def _input_fn():
"""Prepare features and labels."""
images_batch, labels_batch = tf.train.shuffle_batch(
tensors=[dataset_split.images,
dataset_split.labels.astype(np.int32)],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=4)
features_map = {"images": images_batch}
return features_map, labels_batch
return _input_fn
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
num_classes = 10
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.num_classes = num_classes
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2 / FLAGS.examples_per_layer
learner_config.constraints.max_tree_depth = FLAGS.depth
growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.growing_mode = growing_mode
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
# Create a TF Boosted trees estimator that can take in custom loss.
estimator = GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=num_classes,
examples_per_layer=FLAGS.examples_per_layer,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
data = tf.contrib.learn.datasets.mnist.load_mnist()
train_input_fn = get_input_fn(data.train, FLAGS.batch_size)
eval_input_fn = get_input_fn(data.validation, FLAGS.eval_batch_size)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None)
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--eval_batch_size",
type=int,
default=1000,
help="Size of the batch for eval.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
parser.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
parser.add_argument(
"--examples_per_layer",
type=int,
default=1000,
help="Number of examples to accumulate stats for per layer.")
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/examples/mnist.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of handler for split nodes for float columns.
The general idea in batch split finding is that each handler will accumulate its
own statistics on multiple workers. After some steps, the master runs
make_splits() sub-graph of each handler and each handler returns its best split
per partition.
The way we ensure consistency of statistics is by using stamp_tokens for read
and write operations. During each update of the model, a new stamp token is
created. This stamp token makes sure that updates from the previous iterations
are not included in the statistics for this iteration.
Inequality splits for float features are created similar to the method described
in Approximate Algorithm described in https://arxiv.org/pdf/1603.02754v3.pdf.
Weighted quantiles of the feature columns are computed in a distributed fashion
using quantile_ops.quantile_accumulator.
After certain number of steps of parallel accumulation of quantile statistics,
we decide on bucket boundaries. These bucket boundaries are then used for the
next N steps to accumulate gradients and hessians per bucket.
In this implementation, we gather quantile statistics and gradient statistics
concurrently. That means that we don't wait until we have enough quantile
statistics for bucketization before we start gathering gradient stats. Instead
during each step we create quantile stats for the next iteration and use the
previous quantile buckets for gradient stats accumulation.
In make_splits, we do these steps:
1) Get the buckets that were used creating for the gradient stats.
2) Create bucket boundaries for the next N iterations and clear the accumulated
quantile stats.
n3) Get the accumulated gradient stats and clear the accumulator. This step can
run in parallel to step 2.
4) For each leaf node in the current tree (partition):
4.1) Get the overall gain computed with gradients and hessians of all
examples that end up in this partition.
4.2) Compute tensors of left and right cumulative sum of gradients, hessians
and gain. The first dimension of these tensors are the bucket
boundaries.
4.3) Find the gains for all bucket boundaries:
split_gains = left_gain + right_gain - overall_gain.
4.4) Find the bucket boundary that has the best gain (argmax(split_gains))
4.5) For Sparse handler, we also consider the gain for when the examples go
the left child and when the examples go to the right child and pick the
default direction that yields the most gain.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.boosted_trees.lib.learner.batch import base_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.python.ops import gen_quantile_ops
from tensorflow.contrib.boosted_trees.python.ops import gen_stats_accumulator_ops
from tensorflow.contrib.boosted_trees.python.ops import quantile_ops
from tensorflow.contrib.boosted_trees.python.ops import split_handler_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
_BIAS_FEATURE_ID = -1
# Pattern to remove all non alpha numeric from a string.
_PATTERN = re.compile(r"[\W_]+")
class InequalitySplitHandler(base_split_handler.BaseSplitHandler):
"""Base class for handlers of inequality splits."""
def __init__(self,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
epsilon,
num_quantiles,
gradient_shape,
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
loss_uses_sum_reduction=False,
name=None):
"""Initialize the internal state for this split handler.
Args:
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
epsilon: A float, the error bound for quantile computation.
num_quantiles: An int, the number of buckets to create from the histogram.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
SUM or MEAN reduction was used for the loss.
name: An optional handler name.
"""
super(InequalitySplitHandler, self).__init__(
name=name,
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=feature_column_group_id,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy,
loss_uses_sum_reduction=loss_uses_sum_reduction)
self._stats_accumulator = stats_accumulator_ops.StatsAccumulator(
init_stamp_token,
gradient_shape,
hessian_shape,
name="StatsAccumulator/{}".format(self._name))
# Allocate both stats accumulator and quantile accumulator on the same
# device so that we can build splits with fewer RPCs.
with ops.colocate_with(self._stats_accumulator.resource_handle):
self._quantile_accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token,
epsilon=epsilon,
num_quantiles=num_quantiles,
name="QuantileAccumulator/{}".format(self._name))
def reset(self, stamp_token, next_stamp_token):
reset_1 = self._stats_accumulator.flush(stamp_token, next_stamp_token)
reset_2 = self._quantile_accumulator.flush(stamp_token, next_stamp_token)
return control_flow_ops.group([reset_1, reset_2])
class DenseSplitHandler(InequalitySplitHandler):
"""Computes stats and finds the best inequality splits on dense columns."""
def __init__(self,
dense_float_column,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
epsilon,
num_quantiles,
gradient_shape,
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
loss_uses_sum_reduction=False,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE,
name=None):
"""Initialize the internal state for this split handler.
Args:
dense_float_column: A `Tensor` column associated with this handler.
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
epsilon: A float, the error bound for quantile computation.
num_quantiles: An int, the number of buckets to create from the histogram.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
SUM or MEAN reduction was used for the loss.
weak_learner_type: Specifies the type of weak learner to use.
name: An optional handler name.
"""
super(DenseSplitHandler, self).__init__(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=feature_column_group_id,
epsilon=epsilon,
num_quantiles=num_quantiles,
init_stamp_token=init_stamp_token,
name=name,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy,
loss_uses_sum_reduction=loss_uses_sum_reduction)
self._dense_float_column = dense_float_column
self._weak_learner_type = weak_learner_type
# Register dense_make_stats_update function as an Op to the graph.
g = ops.get_default_graph()
dense_make_stats_update.add_to_graph(g)
def scheduled_reads(self):
return [self._quantile_accumulator.schedule_get_buckets()]
def update_stats(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active, scheduled_reads):
"""Updates the state for dense split handler.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
scheduled_reads: List of scheduled reads for this handler.
Returns:
The op that updates the stats for this handler.
"""
name = _PATTERN.sub("", self._name)
with ops.name_scope(name, "DenseSplitHandler"):
are_buckets_ready, buckets = scheduled_reads[0]
(quantile_values, quantile_weights, example_partition_ids,
feature_ids, gradients, hessians) = dense_make_stats_update(
is_active, are_buckets_ready, self._dense_float_column, buckets,
example_partition_ids, gradients, hessians, weights, empty_gradients,
empty_hessians)
update_quantiles = self._quantile_accumulator.schedule_add_summary(
stamp_token=stamp_token,
column=quantile_values,
example_weights=quantile_weights)
update_stats = self._stats_accumulator.schedule_add(
example_partition_ids, feature_ids, gradients, hessians)
return control_flow_ops.no_op(), [update_quantiles, update_stats]
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state."""
if (self._gradient_shape == tensor_shape.scalar() and
self._hessian_shape == tensor_shape.scalar()):
handler = make_dense_split_scalar
else:
handler = make_dense_split_tensor
are_splits_ready, partition_ids, gains, split_infos = (
handler(self._quantile_accumulator.resource_handle,
self._stats_accumulator.resource_handle, stamp_token,
next_stamp_token, self._multiclass_strategy, class_id,
self._feature_column_group_id, self._l1_regularization,
self._l2_regularization, self._tree_complexity_regularization,
self._min_node_weight, self._loss_uses_sum_reduction,
self._weak_learner_type))
return are_splits_ready, partition_ids, gains, split_infos
def _make_dense_split(quantile_accumulator_handle, stats_accumulator_handle,
stamp_token, next_stamp_token, multiclass_strategy,
class_id, feature_column_id, l1_regularization,
l2_regularization, tree_complexity_regularization,
min_node_weight, is_multi_dimentional,
loss_uses_sum_reduction, weak_learner_type):
"""Function that builds splits for a dense feature column."""
# Get the bucket boundaries
are_splits_ready, buckets = (
gen_quantile_ops.quantile_accumulator_get_buckets(
quantile_accumulator_handles=[quantile_accumulator_handle],
stamp_token=stamp_token))
# quantile_accumulator_get_buckets returns a list of results per handle that
# we pass to it. In this case we're getting results just for one resource.
are_splits_ready = are_splits_ready[0]
buckets = buckets[0]
# After we receive the boundaries from previous iteration we can flush
# the quantile accumulator.
with ops.control_dependencies([buckets]):
flush_quantiles = gen_quantile_ops.quantile_accumulator_flush(
quantile_accumulator_handle=quantile_accumulator_handle,
stamp_token=stamp_token,
next_stamp_token=next_stamp_token)
if is_multi_dimentional:
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_tensor_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
else:
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
# For sum_reduction, we don't need to divide by number of minibatches.
num_minibatches = control_flow_ops.cond(
loss_uses_sum_reduction,
lambda: math_ops.cast(1, dtypes.int64),
lambda: num_minibatches)
# Put quantile and stats accumulator flushing in the dependency path.
with ops.control_dependencies([flush_quantiles, partition_ids]):
are_splits_ready = array_ops.identity(are_splits_ready)
partition_ids, gains, split_infos = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=num_minibatches,
bucket_boundaries=buckets,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
class_id=class_id,
feature_column_group_id=feature_column_id,
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
multiclass_strategy=multiclass_strategy,
weak_learner_type=weak_learner_type))
return are_splits_ready, partition_ids, gains, split_infos
class SparseSplitHandler(InequalitySplitHandler):
"""Computes stats and finds the best inequality splits on sparse columns."""
def __init__(self,
sparse_float_column,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
epsilon,
num_quantiles,
gradient_shape,
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
loss_uses_sum_reduction=False,
name=None):
"""Initialize the internal state for this split handler.
Args:
sparse_float_column: A `SparseTensor` column associated with this handler.
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
epsilon: A float, the error bound for quantile computation.
num_quantiles: An int, the number of buckets to create from the histogram.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
SUM or MEAN reduction was used for the loss.
name: An optional handler name.
"""
super(SparseSplitHandler, self).__init__(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=feature_column_group_id,
epsilon=epsilon,
num_quantiles=num_quantiles,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
name=name)
self._sparse_float_column = sparse_float_column
def scheduled_reads(self):
return [self._quantile_accumulator.schedule_get_buckets()]
def update_stats(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active, scheduled_reads):
"""Updates the state for dense split handler.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
scheduled_reads: List of results from the scheduled reads.
Returns:
The op that updates the stats for this handler.
"""
are_buckets_ready, buckets = scheduled_reads[0]
with ops.name_scope(self._name, "SparseSplitHandler"):
(quantile_indices, quantile_values, quantile_shapes, quantile_weights,
example_partition_ids, feature_ids, gradients,
hessians) = sparse_make_stats_update(
is_active, are_buckets_ready, self._sparse_float_column.indices,
self._sparse_float_column.values,
self._sparse_float_column.dense_shape, buckets,
example_partition_ids, gradients, hessians, weights, empty_gradients,
empty_hessians)
update_quantiles = self._quantile_accumulator.schedule_add_summary(
stamp_token=stamp_token,
column=sparse_tensor.SparseTensor(quantile_indices, quantile_values,
quantile_shapes),
example_weights=quantile_weights)
update_stats = self._stats_accumulator.schedule_add(
example_partition_ids, feature_ids, gradients, hessians)
return (control_flow_ops.no_op(), [update_quantiles, update_stats])
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state."""
if (self._gradient_shape == tensor_shape.scalar() and
self._hessian_shape == tensor_shape.scalar()):
handler = make_sparse_split_scalar
else:
handler = make_sparse_split_tensor
are_splits_ready, partition_ids, gains, split_infos = (
handler(self._quantile_accumulator.resource_handle,
self._stats_accumulator.resource_handle, stamp_token,
next_stamp_token, self._multiclass_strategy, class_id,
self._feature_column_group_id, self._l1_regularization,
self._l2_regularization, self._tree_complexity_regularization,
self._min_node_weight, self._loss_uses_sum_reduction))
return are_splits_ready, partition_ids, gains, split_infos
def _make_sparse_split(
quantile_accumulator_handle, stats_accumulator_handle, stamp_token,
next_stamp_token, multiclass_strategy, class_id, feature_column_id,
l1_regularization, l2_regularization, tree_complexity_regularization,
min_node_weight, is_multi_dimentional, loss_uses_sum_reduction):
"""Function that builds splits for a sparse feature column."""
# Get the bucket boundaries
are_splits_ready, buckets = (
gen_quantile_ops.quantile_accumulator_get_buckets(
quantile_accumulator_handles=[quantile_accumulator_handle],
stamp_token=stamp_token))
# quantile_accumulator_get_buckets returns a list of results per handle that
# we pass to it. In this case we're getting results just for one resource.
are_splits_ready = are_splits_ready[0]
buckets = buckets[0]
# After we receive the boundaries from previous iteration we can flush
# the quantile accumulator.
with ops.control_dependencies([buckets]):
flush_quantiles = gen_quantile_ops.quantile_accumulator_flush(
quantile_accumulator_handle=quantile_accumulator_handle,
stamp_token=stamp_token,
next_stamp_token=next_stamp_token)
if is_multi_dimentional:
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_tensor_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
else:
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
num_minibatches = control_flow_ops.cond(
loss_uses_sum_reduction,
lambda: math_ops.cast(1, dtypes.int64),
lambda: num_minibatches)
# Put quantile and stats accumulator flushing in the dependency path.
with ops.control_dependencies([flush_quantiles, partition_ids]):
are_splits_ready = array_ops.identity(are_splits_ready)
partition_ids, gains, split_infos = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=num_minibatches,
bucket_boundaries=buckets,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
class_id=class_id,
feature_column_group_id=feature_column_id,
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
bias_feature_id=_BIAS_FEATURE_ID,
multiclass_strategy=multiclass_strategy))
return are_splits_ready, partition_ids, gains, split_infos
def _specialize_make_split_dense(func, is_multi_dimentional):
"""Builds a specialized version of the function."""
@function.Defun(
dtypes.resource,
dtypes.resource,
dtypes.int64,
dtypes.int64,
dtypes.int32,
dtypes.int32,
dtypes.int32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
dtypes.bool,
dtypes.int32,
noinline=True)
def f(quantile_accumulator_handle, stats_accumulator_handle, stamp_token,
next_stamp_token, multiclass_strategy, class_id, feature_column_id,
l1_regularization, l2_regularization, tree_complexity_regularization,
min_node_weight, loss_uses_sum_reduction, weak_learner_type):
"""Function that builds splits for a sparse feature column."""
return func(quantile_accumulator_handle, stats_accumulator_handle,
stamp_token, next_stamp_token, multiclass_strategy, class_id,
feature_column_id, l1_regularization, l2_regularization,
tree_complexity_regularization, min_node_weight,
is_multi_dimentional, loss_uses_sum_reduction,
weak_learner_type)
return f
def _specialize_make_split_sparse(func, is_multi_dimentional):
"""Builds a specialized version of the function."""
@function.Defun(
dtypes.resource,
dtypes.resource,
dtypes.int64,
dtypes.int64,
dtypes.int32,
dtypes.int32,
dtypes.int32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
dtypes.bool,
noinline=True)
def f(quantile_accumulator_handle, stats_accumulator_handle, stamp_token,
next_stamp_token, multiclass_strategy, class_id, feature_column_id,
l1_regularization, l2_regularization, tree_complexity_regularization,
min_node_weight, loss_uses_sum_reduction):
"""Function that builds splits for a sparse feature column."""
return func(quantile_accumulator_handle, stats_accumulator_handle,
stamp_token, next_stamp_token, multiclass_strategy, class_id,
feature_column_id, l1_regularization, l2_regularization,
tree_complexity_regularization, min_node_weight,
is_multi_dimentional, loss_uses_sum_reduction)
return f
make_dense_split_scalar = _specialize_make_split_dense(
_make_dense_split, is_multi_dimentional=False)
make_dense_split_tensor = _specialize_make_split_dense(
_make_dense_split, is_multi_dimentional=True)
make_sparse_split_scalar = _specialize_make_split_sparse(
_make_sparse_split, is_multi_dimentional=False)
make_sparse_split_tensor = _specialize_make_split_sparse(
_make_sparse_split, is_multi_dimentional=True)
@function.Defun(
dtypes.bool,
dtypes.bool,
dtypes.float32,
dtypes.float32,
dtypes.int32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
noinline=True)
def dense_make_stats_update(is_active, are_buckets_ready, float_column,
quantile_buckets, example_partition_ids, gradients,
hessians, weights, empty_gradients, empty_hessians):
"""Updates the state for dense split handler."""
empty_float = constant_op.constant_v1([], dtype=dtypes.float32)
quantile_values, quantile_weights = control_flow_ops.cond(
is_active[1], # For the next layer, this handler is inactive.
lambda: (float_column, weights),
lambda: (empty_float, empty_float))
def ready_inputs_fn():
"""Branch to execute when quantiles are ready."""
quantized_feature = quantile_ops.quantiles([float_column], [],
[quantile_buckets], [], [])
quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
quantized_feature = array_ops.squeeze(quantized_feature, axis=0)
return (example_partition_ids, quantized_feature, gradients, hessians)
def not_ready_inputs_fn():
return (constant_op.constant_v1([], dtype=dtypes.int32),
constant_op.constant_v1([[]], dtype=dtypes.int64, shape=[1, 2]),
empty_gradients, empty_hessians)
example_partition_ids, feature_ids, gradients, hessians = (
control_flow_ops.cond(
math_ops.logical_and(
math_ops.logical_and(are_buckets_ready,
array_ops.size(quantile_buckets) > 0),
is_active[0]), ready_inputs_fn, not_ready_inputs_fn))
return (quantile_values, quantile_weights, example_partition_ids, feature_ids,
gradients, hessians)
@function.Defun(
dtypes.bool,
dtypes.bool,
dtypes.int64,
dtypes.float32,
dtypes.int64,
dtypes.float32,
dtypes.int32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
dtypes.float32,
noinline=True)
def sparse_make_stats_update(
is_active, are_buckets_ready, sparse_column_indices, sparse_column_values,
sparse_column_shape, quantile_buckets, example_partition_ids, gradients,
hessians, weights, empty_gradients, empty_hessians):
"""Updates the state for this split handler."""
def quantiles_ready():
"""The subgraph for when the quantiles are ready."""
quantized_feature = quantile_ops.quantiles([], [sparse_column_values], [],
[quantile_buckets],
[sparse_column_indices])
quantized_feature = math_ops.cast(quantized_feature[1], dtypes.int64)
quantized_feature = array_ops.squeeze(quantized_feature, axis=0)
example_indices, _ = array_ops.split(
sparse_column_indices, num_or_size_splits=2, axis=1)
example_indices = array_ops.squeeze(example_indices, [1])
filtered_gradients = array_ops.gather(gradients, example_indices)
filtered_hessians = array_ops.gather(hessians, example_indices)
filtered_partition_ids = array_ops.gather(example_partition_ids,
example_indices)
unique_partitions, mapped_partitions = array_ops.unique(
example_partition_ids)
# Compute aggregate stats for each partition.
# Since unsorted_segment_sum can be numerically unstable, use 64bit
# operation.
gradients64 = math_ops.cast(gradients, dtypes.float64)
hessians64 = math_ops.cast(hessians, dtypes.float64)
per_partition_gradients = math_ops.unsorted_segment_sum(
gradients64, mapped_partitions, array_ops.size(unique_partitions))
per_partition_hessians = math_ops.unsorted_segment_sum(
hessians64, mapped_partitions, array_ops.size(unique_partitions))
per_partition_gradients = math_ops.cast(per_partition_gradients,
dtypes.float32)
per_partition_hessians = math_ops.cast(per_partition_hessians,
dtypes.float32)
# Prepend a bias feature per partition that accumulates the stats for all
# examples in that partition.
bias_feature_ids = array_ops.fill(
array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
zeros = array_ops.zeros_like(bias_feature_ids)
bias_feature_ids = array_ops.stack([bias_feature_ids, zeros], axis=1)
partition_ids = array_ops.concat(
[unique_partitions, filtered_partition_ids], 0)
filtered_gradients = array_ops.concat(
[per_partition_gradients, filtered_gradients], 0)
filtered_hessians = array_ops.concat(
[per_partition_hessians, filtered_hessians], 0)
bucket_ids = array_ops.concat([bias_feature_ids, quantized_feature], 0)
return partition_ids, bucket_ids, filtered_gradients, filtered_hessians
def quantiles_not_ready():
"""The subgraph for when the quantiles are not ready."""
return (constant_op.constant_v1([], dtype=dtypes.int32),
constant_op.constant_v1([], dtype=dtypes.int64, shape=[1, 2]),
empty_gradients, empty_hessians)
empty_float = constant_op.constant_v1([], dtype=dtypes.float32)
handler_not_active = (constant_op.constant(
[], dtype=dtypes.int64, shape=[0, 2]), empty_float,
constant_op.constant([0, 1], dtype=dtypes.int64),
empty_float)
handler_active = (sparse_column_indices, sparse_column_values,
sparse_column_shape, weights)
quantile_indices, quantile_values, quantile_shape, quantile_weights = (
control_flow_ops.cond(is_active[1], lambda: handler_active,
lambda: handler_not_active))
example_partition_ids, feature_ids, gradients, hessians = (
control_flow_ops.cond(
math_ops.logical_and(are_buckets_ready,
array_ops.size(quantile_buckets) > 0),
quantiles_ready, quantiles_not_ready))
return (quantile_indices, quantile_values, quantile_shape, quantile_weights,
example_partition_ids, feature_ids, gradients, hessians)
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class EqualitySplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testObliviousFeatureSplitGeneration(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 1 | 1 |
# i1 | (-0.5, 0.07) | 1 | 2 |
# i2 | (1.2, 0.2) | 1 | 1 |
# i3 | (4.0, 0.13) | 2 | 2 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [1, 1, 1, 2]
indices = [[0, 0], [1, 0], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 1, 2], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([1, 2], partitions)
# For partition 1.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight1 = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain1 = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight1 = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain1 = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain1 = 0.46043165467625885
split_info = split_info_pb2.ObliviousSplitInfo()
split_info.ParseFromString(splits[0])
# Children of partition 1.
left_child = split_info.children[0].vector
right_child = split_info.children[1].vector
split_node = split_info.split_node.oblivious_categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
self.assertAllClose([expected_left_weight1], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight1], right_child.value, 0.00001)
# For partition2.
expected_left_weight2 = 0
expected_left_gain2 = 0
# -(4 - 0.1) / (0.13 + 1)
expected_right_weight2 = -3.4513274336283186
# (4 - 0.1) ** 2 / (0.13 + 1)
expected_right_gain2 = 13.460176991150442
# (4 - 0.1) ** 2 / (0.13 + 1)
expected_bias_gain2 = 13.460176991150442
# Children of partition 2.
left_child = split_info.children[2].vector
right_child = split_info.children[3].vector
self.assertAllClose([expected_left_weight2], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight2], right_child.value, 0.00001)
self.assertAllClose(
expected_left_gain1 + expected_right_gain1 - expected_bias_gain1 +
expected_left_gain2 + expected_right_gain2 - expected_bias_gain2,
gains[0], 0.00001)
def testGenerateFeatureSplitCandidatesSumReduction(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.4 + 2.4 - 0.1) / (0.24 + 0.4 + 1)
expected_left_weight = -1.6463414634146338
# (0.4 + 2.4 - 0.1) ** 2 / (0.24 + 0.4 + 1)
expected_left_gain = 4.445121951219511
# -(-1 + 0.1) / (0.14 + 1)
expected_right_weight = 0.789473684211
# (-1 + 0.1) ** 2 / (0.14 + 1)
expected_right_gain = 0.710526315789
# (0.4 + -1 + 2.4 - 0.1) ** 2 / (0.24 + 0.14 + 0.4 + 1)
expected_bias_gain = 1.6235955056179772
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-8 + 0.1) / (0.26 + 1)
expected_left_weight = -6.26984126984
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_left_gain = 49.5317460317
expected_right_weight = 0
expected_right_gain = 0
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_bias_gain = 49.5317460317
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testGenerateFeatureSplitCandidatesMulticlass(self):
with self.cached_session() as sess:
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
def testEmpty(self):
with self.cached_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = constant_op.constant_v1([], dtype=dtypes.int64, shape=[0, 2])
values = constant_op.constant_v1([], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testInactive(self):
with self.cached_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, False]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testLastOneEmpty(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0]]
values = array_ops.constant([1, 2, 2], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of handler for split nodes for categorical columns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import base_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.python.ops import split_handler_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
_BIAS_FEATURE_ID = int(dtypes.int64.min)
class EqualitySplitHandler(base_split_handler.BaseSplitHandler):
"""Creates equality split type for categorical features."""
def __init__(self,
sparse_int_column,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
gradient_shape,
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
loss_uses_sum_reduction=False,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE,
name=None):
"""Initialize the internal state for this split handler.
Args:
sparse_int_column: A `SparseTensor` column with int64 values associated
with this handler.
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
SUM or MEAN reduction was used for the loss.
weak_learner_type: Specifies the type of weak learner to use.
name: An optional handler name.
"""
super(EqualitySplitHandler, self).__init__(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=feature_column_group_id,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy,
loss_uses_sum_reduction=loss_uses_sum_reduction,
name=name)
self._stats_accumulator = stats_accumulator_ops.StatsAccumulator(
init_stamp_token,
gradient_shape,
hessian_shape,
name="StatsAccumulator/{}".format(self._name))
self._sparse_int_column = sparse_int_column
self._weak_learner_type = weak_learner_type
def update_stats(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active, scheduled_reads):
"""Updates the state for equality split handler.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
scheduled_reads: List of results from the scheduled reads.
Returns:
The op that updates the stats for this handler.
Raises:
ValueError: If example_columns is not a single sparse column.
"""
del scheduled_reads # Unused by the categorical split handler.
def not_active_inputs():
return (constant_op.constant([], dtype=dtypes.int32),
constant_op.constant_v1([], dtype=dtypes.int64, shape=[1, 2]),
empty_gradients, empty_hessians)
def active_inputs():
"""The normal flow when the handler is active."""
# Remove the second column of example indices matrix since it is not
# useful.
example_indices, _ = array_ops.split(
self._sparse_int_column.indices, num_or_size_splits=2, axis=1)
example_indices = array_ops.squeeze(example_indices, [1])
filtered_gradients = array_ops.gather(gradients, example_indices)
filtered_hessians = array_ops.gather(hessians, example_indices)
filtered_partition_ids = array_ops.gather(example_partition_ids,
example_indices)
unique_partitions, mapped_partitions = array_ops.unique(
example_partition_ids)
# Compute aggregate stats for each partition.
# The bias is computed on gradients and hessians (and not
# filtered_gradients) which have exactly one value per example, so we
# don't double count a gradient in multivalent columns.
# Since unsorted_segment_sum can be numerically unstable, use 64bit
# operation.
gradients64 = math_ops.cast(gradients, dtypes.float64)
hessians64 = math_ops.cast(hessians, dtypes.float64)
per_partition_gradients = math_ops.unsorted_segment_sum(
gradients64, mapped_partitions, array_ops.size(unique_partitions))
per_partition_hessians = math_ops.unsorted_segment_sum(
hessians64, mapped_partitions, array_ops.size(unique_partitions))
per_partition_gradients = math_ops.cast(per_partition_gradients,
dtypes.float32)
per_partition_hessians = math_ops.cast(per_partition_hessians,
dtypes.float32)
# Prepend a bias feature per partition that accumulates the stats for all
# examples in that partition.
# Bias is added to the stats even if there are no examples with values in
# the current sparse column. The reason is that the other example batches
# might have values in these partitions so we have to keep the bias
# updated.
bias_feature_ids = array_ops.fill(
array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
partition_ids = array_ops.concat(
[unique_partitions, filtered_partition_ids], 0)
filtered_gradients = array_ops.concat(
[per_partition_gradients, filtered_gradients], 0)
filtered_hessians = array_ops.concat(
[per_partition_hessians, filtered_hessians], 0)
feature_ids = array_ops.concat(
[bias_feature_ids, self._sparse_int_column.values], 0)
# Dimension is always zero for sparse int features.
dimension_ids = array_ops.zeros_like(feature_ids, dtype=dtypes.int64)
feature_ids_and_dimensions = array_ops.stack(
[feature_ids, dimension_ids], axis=1)
return (partition_ids, feature_ids_and_dimensions, filtered_gradients,
filtered_hessians)
partition_ids, feature_ids, gradients_out, hessians_out = (
control_flow_ops.cond(is_active[0], active_inputs, not_active_inputs))
result = self._stats_accumulator.schedule_add(partition_ids, feature_ids,
gradients_out, hessians_out)
return (control_flow_ops.no_op(), [result])
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state."""
# Get the aggregated gradients and hessians per <partition_id, feature_id>
# pair.
num_minibatches, partition_ids, feature_ids, gradients, hessians = (
self._stats_accumulator.flush(stamp_token, next_stamp_token))
# For sum_reduction, we don't need to divide by number of minibatches.
num_minibatches = control_flow_ops.cond(
ops.convert_to_tensor(self._loss_uses_sum_reduction),
lambda: math_ops.cast(1, dtypes.int64),
lambda: num_minibatches)
partition_ids, gains, split_infos = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=num_minibatches,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
class_id=class_id,
feature_column_group_id=self._feature_column_group_id,
l1_regularization=self._l1_regularization,
l2_regularization=self._l2_regularization,
tree_complexity_regularization=self._tree_complexity_regularization,
min_node_weight=self._min_node_weight,
bias_feature_id=_BIAS_FEATURE_ID,
multiclass_strategy=self._multiclass_strategy,
weak_learner_type=self._weak_learner_type))
# There are no warm-up rounds needed in the equality column handler. So we
# always return ready.
are_splits_ready = constant_op.constant(True)
return (are_splits_ready, partition_ids, gains, split_infos)
def reset(self, stamp_token, next_stamp_token):
reset = self._stats_accumulator.flush(stamp_token, next_stamp_token)
return reset
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class DenseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testObliviousFeatureSplitGeneration(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 1 | 3 |
# i1 | (-0.5, 0.07) | 1 | 3 |
# i2 | (1.2, 0.2) | 1 | 1 |
# i3 | (4.0, 0.13) | 2 | 2 |
dense_column = array_ops.placeholder(
dtypes.float32, shape=(4, 1), name="dense_column")
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([1, 1, 1, 2], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
# Forcing the creation of four buckets.
are_splits_ready = sess.run(
[are_splits_ready],
feed_dict={dense_column: [[0.2], [0.62], [0.3], [0.52]]})[0]
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
# Only using the last three buckets.
are_splits_ready2, partitions, gains, splits = (
sess.run(
[are_splits_ready2, partitions, gains, splits],
feed_dict={dense_column: [[0.62], [0.62], [0.3], [0.52]]}))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([1, 2], partitions)
oblivious_split_info = split_info_pb2.ObliviousSplitInfo()
oblivious_split_info.ParseFromString(splits[0])
split_node = oblivious_split_info.split_node
split_node = split_node.oblivious_dense_float_binary_split
self.assertAllClose(0.3, split_node.threshold, 0.00001)
self.assertEqual(0, split_node.feature_column)
# Check the split on partition 1.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight_1 = -0.9166666666666666
# expected_left_weight_1 * -(1.2 - 0.1)
expected_left_gain_1 = 1.008333333333333
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight_1 = 0.1680672
# expected_right_weight_1 * -(-0.5 + 0.2 + 0.1))
expected_right_gain_1 = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain_1 = 0.46043165467625896
left_child = oblivious_split_info.children[0].vector
right_child = oblivious_split_info.children[1].vector
self.assertAllClose([expected_left_weight_1], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight_1], right_child.value, 0.00001)
# Check the split on partition 2.
expected_left_weight_2 = 0
expected_left_gain_2 = 0
# -(4 - 0.1) / (0.13 + 1)
expected_right_weight_2 = -3.4513274336283186
# expected_right_weight_2 * -(4 - 0.1)
expected_right_gain_2 = 13.460176991150442
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain_2 = 13.460176991150442
left_child = oblivious_split_info.children[2].vector
right_child = oblivious_split_info.children[3].vector
self.assertAllClose([expected_left_weight_2], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight_2], right_child.value, 0.00001)
# The layer gain is the sum of the gains of each partition
layer_gain = (
expected_left_gain_1 + expected_right_gain_1 - expected_bias_gain_1) + (
expected_left_gain_2 + expected_right_gain_2 - expected_bias_gain_2)
self.assertAllClose(layer_gain, gains[0], 0.00001)
# We have examples in both partitions, then we get both ids.
self.assertEqual(2, len(oblivious_split_info.children_parent_id))
self.assertEqual(1, oblivious_split_info.children_parent_id[0])
self.assertEqual(2, oblivious_split_info.children_parent_id[1])
def testGenerateFeatureSplitCandidatesLossUsesSumReduction(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.2,
l2_regularization=2.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_3 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2, update_3]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(2.4 - 0.2) / (0.4 + 2)
expected_left_weight = -0.91666
# expected_left_weight * -(2.4 - 0.2)
expected_left_gain = 2.016666666666666
# -(-1 + 0.4 + 0.2) / (0.38 + 2)
expected_right_weight = 0.1680672
# expected_right_weight * -(-1 + 0.4 + 0.2)
expected_right_gain = 0.0672268907563025
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.9208633093525178
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-8 + 0.2) / (0.26 + 2)
expected_left_weight = -3.4513274336283186
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.cached_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.cached_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# Each hessian is a diagonal of a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
class_id = -1
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=3,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive, so it shouldn't return any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testGenerateFeatureSplitCandidatesWithTreeComplexity(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.5,
min_node_weight=0.,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.5 + 0.2 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.5 + 0.2 + 0.1))
expected_right_gain = 0.033613445378151252
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain - 0.5,
gains[0], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testGenerateFeatureSplitCandidatesWithMinNodeWeight(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Dense Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | 1 |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 2.0) | 1 | 1 |
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 2])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.DenseSplitHandler(
l1_regularization=0.1,
l2_regularization=1.,
tree_complexity_regularization=0.5,
min_node_weight=1.5,
epsilon=0.001,
num_quantiles=10,
feature_column_group_id=0,
dense_float_column=dense_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the gain on partition 0 to be -0.5.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Make sure the gain is subtracted by the tree complexity regularization.
self.assertAllClose(-0.5, gains[0], 0.00001)
self.assertEqual(0, split_node.feature_column)
# Check the split on partition 1.
# (-4 + 0.1) / (2 + 1)
expected_left_weight = -1.3
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so -0.5 gain is expected (because of tree complexity.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(-0.5, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
class SparseSplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesLossUsesSumReduction(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=4.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_3 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2, update_3]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.4 + 2.4) / (0.24 + 0.4 + 4)
expected_left_weight = -0.603448275862069
# (0.4 + 2.4) ** 2 / (0.24 + 0.4 + 4)
expected_left_gain = 1.689655172413793
# 1 / (0.14 + 4)
expected_right_weight = 0.24154589371980678
# 1 ** 2 / (0.14 + 4)
expected_right_gain = 0.24154589371980678
# (0.4 + 2.4 - 1) ** 2 / (0.24 + 0.4 + 0.14 + 4)
expected_bias_gain = 0.6778242677824265
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.cached_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant([[0.2, 1.4], [-0.5, 0.1], [1.2, 3],
[4.0, -3]])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self):
with self.cached_session() as sess:
# Batch is 4, 2 classes
gradients = array_ops.constant([[0.2, 1.4], [-0.5, 0.1], [1.2, 3],
[4.0, -3]])
# Each hessian is a diagonal from a full hessian matrix.
hessian_0 = [0.12, 0.11]
hessian_1 = [0.07, 0.2]
hessian_2 = [0.2, 0.9]
hessian_3 = [0.13, 2.2]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2])
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testGenerateFeatureSplitCandidatesInactive(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Sparse Quantile |
# i0 | (0.2, 0.12) | 0 | 1 |
# i1 | (-0.5, 0.07) | 0 | N/A |
# i2 | (1.2, 0.2) | 0 | 0 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.52, 0.3, 0.52])
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
sparse_float_column=sparse_column,
init_stamp_token=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, False]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# The handler was inactive so it shouldn't any splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testEmpty(self):
with self.cached_session() as sess:
indices = constant_op.constant_v1([], dtype=dtypes.int64, shape=[0, 2])
# No values in this feature column in this mini-batch.
values = constant_op.constant_v1([], dtype=dtypes.float32)
sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testEmptyBuckets(self):
"""Test that reproduces the case when quantile buckets were empty."""
with self.cached_session() as sess:
sparse_column = array_ops.sparse_placeholder(dtypes.float32)
# We have two batches - at first, a sparse feature is empty.
empty_indices = constant_op.constant_v1([], dtype=dtypes.int64,
shape=[0, 2])
empty_values = constant_op.constant_v1([], dtype=dtypes.float32)
empty_sparse_column = sparse_tensor.SparseTensor(empty_indices,
empty_values, [4, 2])
empty_sparse_column = empty_sparse_column.eval(session=sess)
# For the second batch, the sparse feature is not empty.
non_empty_indices = array_ops.constant(
[[0, 0], [2, 1], [3, 2]], dtype=dtypes.int64, shape=[3, 2])
non_empty_values = array_ops.constant(
[0.52, 0.3, 0.52], dtype=dtypes.float32)
non_empty_sparse_column = sparse_tensor.SparseTensor(
non_empty_indices, non_empty_values, [4, 2])
non_empty_sparse_column = non_empty_sparse_column.eval(session=sess)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
# First, calculate quantiles and try to update on an empty data for a
# feature.
are_splits_ready = (
sess.run(
are_splits_ready,
feed_dict={sparse_column: empty_sparse_column}))
self.assertFalse(are_splits_ready)
update_2 = split_handler.update_stats_sync(
1,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
# Now the feature in the second batch is not empty, but buckets
# calculated on the first batch are empty.
are_splits_ready2, partitions, gains, splits = (
sess.run(
[are_splits_ready2, partitions, gains, splits],
feed_dict={sparse_column: non_empty_sparse_column}))
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
# Since the buckets were empty, we can't calculate the splits.
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testDegenerativeCase(self):
with self.cached_session() as sess:
# One data example only, one leaf and thus one quantile bucket.The same
# situation is when all examples have the same values. This case was
# causing before a failure.
gradients = array_ops.constant([0.2])
hessians = array_ops.constant([0.12])
example_partitions = array_ops.constant([1], dtype=dtypes.int32)
indices = array_ops.constant([[0, 0]], dtype=dtypes.int64)
values = array_ops.constant([0.58])
sparse_column = sparse_tensor.SparseTensor(indices, values, [1, 1])
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = ordinal_split_handler.SparseSplitHandler(
l1_regularization=0.0,
l2_regularization=2.0,
tree_complexity_regularization=0.0,
min_node_weight=0.0,
epsilon=0.01,
num_quantiles=2,
feature_column_group_id=0,
sparse_float_column=sparse_column,
init_stamp_token=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([1, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready = split_handler.make_splits(
np.int64(0), np.int64(1), class_id)[0]
with ops.control_dependencies([are_splits_ready]):
update_2 = split_handler.update_stats_sync(
1,
example_partitions,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_2]):
are_splits_ready2, partitions, gains, splits = (
split_handler.make_splits(np.int64(1), np.int64(2), class_id))
are_splits_ready, are_splits_ready2, partitions, gains, splits = (
sess.run([
are_splits_ready, are_splits_ready2, partitions, gains, splits
]))
# During the first iteration, inequality split handlers are not going to
# have any splits. Make sure that we return not_ready in that case.
self.assertFalse(are_splits_ready)
self.assertTrue(are_splits_ready2)
self.assertAllEqual([1], partitions)
self.assertAllEqual([0.0], gains)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.58, split_node.split.threshold)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for creating split nodes using one or more features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
from tensorflow.python.ops import control_flow_ops
@six.add_metaclass(abc.ABCMeta)
class BaseSplitHandler(object):
"""Abstract Base class defining split handlers interface."""
def __init__(self,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
gradient_shape,
hessian_shape,
multiclass_strategy,
loss_uses_sum_reduction=False,
name=None):
"""Constructor for BaseSplitHandler.
Args:
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
SUM or MEAN reduction was used for the loss.
name: An optional handler name.
"""
self._l1_regularization = l1_regularization
self._l2_regularization = l2_regularization
self._tree_complexity_regularization = tree_complexity_regularization
self._min_node_weight = min_node_weight
self._feature_column_group_id = feature_column_group_id
self._name = name or ""
self._multiclass_strategy = multiclass_strategy
self._hessian_shape = hessian_shape
self._gradient_shape = gradient_shape
self._loss_uses_sum_reduction = loss_uses_sum_reduction
def scheduled_reads(self):
"""Returns the list of `ScheduledOp`s required for update_stats."""
return []
@abc.abstractmethod
def update_stats(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active, scheduled_reads):
"""Updates the state for this split handler.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
scheduled_reads: List of results from the scheduled reads.
Returns:
A tuple of the op that updates the stats for this handler and a list of
`ScheduledOp`s.
"""
def update_stats_sync(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active):
"""Updates the state for this split handler running the scheduled I/O.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
Returns:
Op that updates the stats for this handler.
"""
handler_reads = {self: self.scheduled_reads()}
handler_results = batch_ops_utils.run_handler_scheduled_ops(
handler_reads, stamp_token, None)
update_1, scheduled_updates = self.update_stats(
stamp_token, example_partition_ids, gradients, hessians,
empty_gradients, empty_hessians, weights, is_active,
handler_results[self])
update_2 = batch_ops_utils.run_handler_scheduled_ops({
self: scheduled_updates
}, stamp_token, None)
return control_flow_ops.group(update_1, *update_2[self])
@abc.abstractmethod
def reset(self, stamp_token, next_stamp_token):
"""Resets the state maintained by the handler."""
@abc.abstractmethod
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state.
This should only be called by the master.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
next_stamp_token: An int32 scalar tensor containing the stamp token for
the next iteration.
class_id: what class id the handler gathers stats for (for tree per class
strategy). When class_id=-1, the strategy is not tree per class.
Returns:
A tuple (are_splits_ready, partition_id, gain, split_info) where
are_splits_ready is a scalar boolean tensor, partition_id is a rank 1,
int32 tensor, gain is a rank 1 float32 tensor and split_info is a rank 1
string tensor containing serialized SplitInfo protos.
"""
|
tensorflow-master
|
tensorflow/contrib/boosted_trees/lib/learner/batch/base_split_handler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Remote fused graph ops python library.
## This package provides classes for remote fused graph ops.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.remote_fused_graph.pylib.python.ops.remote_fused_graph_ops import *
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['remote_fused_graph_execute']
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/remote_fused_graph/pylib/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Remote fused graph ops python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/remote_fused_graph/pylib/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.remote_fused_graph_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.remote_fused_graph.pylib.python.ops import remote_fused_graph_ops
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class RemoteFusedGraphExecuteTest(test_util.TensorFlowTestCase):
"""Tests for RemoteFusedGraphExecute op."""
def testBuild(self):
graph = graph_pb2.GraphDef()
node = graph.node.add()
node.name = "a"
node.op = "op0"
node = graph.node.add()
node.name = "b"
node.op = "op1"
inputs = [ops.convert_n_to_tensor([1], dtypes.int64)]
output_types = [np.int64, np.int64]
graph_input_node_names = ["a"]
graph_output_node_names = ["a", "b"]
executor_name = ""
serialized_executor_parameters = b""
default_graph_input_tensor_type_shapes = [[dtypes.int64, [1]]]
default_graph_output_tensor_type_shapes = [[dtypes.int64, [1]],
[dtypes.int64, [1]]]
output_nodes = remote_fused_graph_ops.remote_fused_graph_execute(
inputs, output_types, graph, graph_input_node_names,
graph_output_node_names, executor_name, serialized_executor_parameters,
default_graph_input_tensor_type_shapes,
default_graph_output_tensor_type_shapes)
self.assertEqual(2, len(output_nodes))
for output_node in output_nodes:
with self.test_session(use_gpu=False):
output_node.eval()
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/contrib/remote_fused_graph/pylib/python/ops/remote_fused_graph_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Remote fused graph ops python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/remote_fused_graph/pylib/python/ops/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations to execute a subgraph on a remote processor."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.remote_fused_graph.pylib.python.ops import gen_remote_fused_graph_ops
from tensorflow.core.framework import remote_fused_graph_execute_info_pb2 as info_pb2
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
# RemoteFusedGraphExecute is not differenciable op.
ops.NotDifferentiable("RemoteFusedGraphExecute")
def remote_fused_graph_execute(inputs,
output_types,
graph_def,
graph_input_node_names,
graph_output_node_names,
executor_name,
serialized_executor_parameters,
default_graph_input_tensor_type_shapes=None,
default_graph_output_tensor_type_shapes=None):
"""A wrapper for remote_fused_graph_execute."""
info_proto = info_pb2.RemoteFusedGraphExecuteInfo()
info_proto.remote_graph.CopyFrom(graph_def)
info_proto.graph_input_node_name.extend(graph_input_node_names)
info_proto.graph_output_node_name.extend(graph_output_node_names)
info_proto.executor_name = executor_name
info_proto.serialized_executor_parameters = serialized_executor_parameters
if default_graph_input_tensor_type_shapes:
for type_shape in default_graph_input_tensor_type_shapes:
type_shape_proto = info_proto.default_graph_input_tensor_shape.add()
type_shape_proto.dtype = dtypes.as_dtype(type_shape[0]).as_datatype_enum
for dim in type_shape[1]:
type_shape_proto.shape.dim.add().size = dim
if default_graph_output_tensor_type_shapes:
for type_shape in default_graph_output_tensor_type_shapes:
type_shape_proto = info_proto.default_graph_output_tensor_shape.add()
type_shape_proto.dtype = dtypes.as_dtype(type_shape[0]).as_datatype_enum
for dim in type_shape[1]:
type_shape_proto.shape.dim.add().size = dim
serialized_info = info_proto.SerializeToString()
return gen_remote_fused_graph_ops.remote_fused_graph_execute(
inputs, output_types, serialized_info)
|
tensorflow-master
|
tensorflow/contrib/remote_fused_graph/pylib/python/ops/remote_fused_graph_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RMSprop optimizer for Tensorflow.
rmsprop algorithm [tieleman2012rmsprop]
A detailed description of rmsprop.
- maintain a moving (discounted) average of the square of gradients
- divide gradient by the root of this average
mean_square = rho * mean_square{t-1} + (1-rho) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(mean_square)
delta = - mom
This implementation of RMSProp uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving (discounted) average of the
gradients, and uses that average to estimate the variance:
mean_grad = rho * mean_square{t-1} + (1-rho) * gradient
mean_square = rho * mean_square{t-1} + (1-rho) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t /
sqrt(mean_square - mean_grad**2)
delta = - mom
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.training import training_ops
class RMSPropOptimizer(optimizer_v2.OptimizerV2):
"""Optimizer that implements the RMSProp algorithm.
See the
[paper]
(http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
"""
def __init__(self,
learning_rate,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
use_locking=False,
centered=False,
name="RMSProp"):
"""Construct a new RMSProp optimizer.
Note that in the dense implementation of this algorithm, variables and their
corresponding accumulators (momentum, gradient moving average, square
gradient moving average) will be updated even if the gradient is zero
(i.e. accumulators will decay, momentum will be applied). The sparse
implementation (used when the gradient is an `IndexedSlices` object,
typically because of `tf.gather` or an embedding lookup in the forward pass)
will not update variable slices or their accumulators unless those slices
were used in the forward pass (nor is there an "eventual" correction to
account for these omitted updates). This leads to more efficient updates for
large embedding lookup tables (where most of the slices are not accessed in
a particular graph execution), but differs from the published algorithm.
Some of the args below are hyperparameters, where a hyperparameter is
defined as a scalar Tensor, a regular Python value or a callable (which
will be evaluated when `apply_gradients` is called) returning a scalar
Tensor or a Python value.
Args:
learning_rate: A float hyperparameter. The learning rate.
decay: A float hyperparameter. Discounting factor for the history/coming
gradient.
momentum: A float hyperparameter.
epsilon: A float hyperparameter. Small value to initialize the average
square gradient variable and avoid zero denominator.
use_locking: If True use locks for update operation.
centered: If True, gradients are normalized by the estimated variance of
the gradient; if False, by the uncentered second moment. Setting this to
True may help with training, but is slightly more expensive in terms of
computation and memory. Defaults to False.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "RMSProp".
"""
super(RMSPropOptimizer, self).__init__(use_locking, name)
self._set_hyper("learning_rate", learning_rate)
self._set_hyper("decay", decay)
self._set_hyper("momentum", momentum)
self._set_hyper("epsilon", epsilon)
self._centered = centered
def _create_vars(self, var_list, state):
for v in var_list:
init_rms = state.get_hyper("epsilon",
v.dtype.base_dtype) * array_ops.ones_like(v)
state.create_slot_with_initializer(v, init_rms, v.get_shape(),
v.dtype.base_dtype, "rms")
if self._centered:
state.zeros_slot(v, "mg")
state.zeros_slot(v, "momentum")
def _apply_dense(self, grad, var, state):
rms = state.get_slot(var, "rms")
mom = state.get_slot(var, "momentum")
if self._centered:
mg = state.get_slot(var, "mg")
return training_ops.apply_centered_rms_prop(
var,
mg,
rms,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
# epsilon is now the rms initial value and is not added to the
# denominator anymore, hence calling the kernel op with epsilon=0.
0,
grad,
use_locking=self._use_locking).op
else:
return training_ops.apply_rms_prop(
var,
rms,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
0,
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var, state):
rms = state.get_slot(var, "rms")
mom = state.get_slot(var, "momentum")
if self._centered:
mg = state.get_slot(var, "mg")
return training_ops.resource_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
0,
grad,
use_locking=self._use_locking)
else:
return training_ops.resource_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
0,
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var, state):
rms = state.get_slot(var, "rms")
mom = state.get_slot(var, "momentum")
if self._centered:
mg = state.get_slot(var, "mg")
return training_ops.sparse_apply_centered_rms_prop(
var,
mg,
rms,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
0,
grad.values,
grad.indices,
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_rms_prop(
var,
rms,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
0,
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, state):
rms = state.get_slot(var, "rms")
mom = state.get_slot(var, "momentum")
if self._centered:
mg = self.get_slot(var, "mg")
return training_ops.resource_sparse_apply_centered_rms_prop(
var.handle,
mg.handle,
rms.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
0,
grad,
indices,
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_rms_prop(
var.handle,
rms.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
state.get_hyper("decay", var.dtype.base_dtype),
state.get_hyper("momentum", var.dtype.base_dtype),
0,
grad,
indices,
use_locking=self._use_locking)
|
tensorflow-master
|
tensorflow/contrib/optimizer_v2/rmsprop.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Momentum for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import training_ops
class GradientDescentOptimizer(optimizer_v2.OptimizerV2):
"""Optimizer that implements the gradient descent algorithm."""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
The learning rate arg below is a hyperparameter where a hyperparameter is
defined as a scalar Tensor, a regular Python value or a callable (which
will be evaluated when `apply_gradients` is called) returning a scalar
Tensor or a Python value.
Args:
learning_rate: A float hyperparameter. The learning rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._set_hyper("learning_rate", learning_rate)
def _apply_dense(self, grad, var, state):
return training_ops.apply_gradient_descent(
var,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle, state):
lr = state.get_hyper("learning_rate", grad.dtype.base_dtype)
return training_ops.resource_apply_gradient_descent(
handle.handle, lr, grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,
state):
lr = state.get_hyper("learning_rate", grad.dtype.base_dtype)
return resource_variable_ops.resource_scatter_add(handle.handle, indices,
-grad * lr)
def _apply_sparse_duplicate_indices(self, grad, var, state):
delta = ops.IndexedSlices(
grad.values * state.get_hyper("learning_rate", var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
|
tensorflow-master
|
tensorflow/contrib/optimizer_v2/gradient_descent.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self, use_locking=False, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1, use_locking=use_locking)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testBasic(self):
self.doTestBasic(use_locking=False)
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0], [3.0, 4.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1], [3, 4]], var0.eval(), atol=0.01)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], var0.eval())
self.assertAllClose([[3.0], [4.0]], var1.eval())
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), var1.eval())
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def testSparseRepeatedIndicesResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = adagrad.AdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
def testSparseStability(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype),
constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), var0.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testDynamicShapeVariable_Ok(self):
with self.cached_session():
v = variable_scope.get_variable("v", initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(v.shape.is_fully_defined())
# Creating optimizer should cause no exception.
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/optimizer_v2/adagrad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.optimizer_v2 import adam
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adam.AdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.AdamOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.AdamOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertTrue(beta1_power is not None)
self.assertTrue(beta2_power is not None)
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/Adam:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = adam.AdamOptimizer()
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adam.AdamOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertEqual(6, len(set(opt.variables())))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/optimizer_v2/adam_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for GradientDescent optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import gradient_descent
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class GradientDescentOptimizerTest(test.TestCase):
def testBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
optimizer = gradient_descent.GradientDescentOptimizer(3.0)
sgd_op = optimizer.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
self.assertEqual(0, len(optimizer.variables()))
def testBasicResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
def testMinimizeResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(var0, x) + var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0 - np_grad], var1.eval())
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
pred += var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0 - np_grad], var1.eval())
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lrate = constant_op.constant(3.0)
sgd_op = gradient_descent.GradientDescentOptimizer(
lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
def testGradWrtRef(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
opt = gradient_descent.GradientDescentOptimizer(3.0)
values = [1.0, 3.0]
vars_ = [variables.Variable([v], dtype=dtype) for v in values]
grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_)
variables.global_variables_initializer().run()
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], grad.eval())
def testWithGlobalStep(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
global_step = variables.Variable(0, trainable=False)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params and global_step
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
self.assertAllCloseAccordingToType(1, global_step.eval())
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0], [2.0]], var0.eval())
self.assertAllCloseAccordingToType([[3.0], [4.0]], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]],
var0.eval())
self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]],
var1.eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/optimizer_v2/gradient_descent_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.