python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Iterable, Optional
from compiler_gym.datasets import Dataset
from compiler_gym.envs.mlir.datasets.matmul import MatmulBenchmark, MatmulDataset
from compiler_gym.util.runfiles_path import site_data_path
def get_mlir_datasets(site_data_base: Optional[Path] = None) -> Iterable[Dataset]:
"""Instantiate the builtin datasets.
:param site_data_base: The root of the site data path.
:return: An iterable sequence of :class:`Dataset
<compiler_gym.datasets.Dataset>` instances.
"""
site_data_base = site_data_base or site_data_path("mlir-v0")
yield MatmulDataset(site_data_base=site_data_base)
__all__ = ["MatmulDataset", "MatmulBenchmark"]
|
CompilerGym-development
|
compiler_gym/envs/mlir/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, List
from compiler_gym.errors import ServiceError
from compiler_gym.service.proto import ObservationSpace
from compiler_gym.util.gym_type_hints import (
ActionType,
ObservationType,
RewardType,
StepType,
)
from compiler_gym.views.observation_space_spec import ObservationSpaceSpec
class ObservationView:
"""A view into the available observation spaces of a service.
Example usage:
>>> env = gym.make("llvm-v0")
>>> env.reset()
>>> env.observation.spaces.keys()
["Autophase", "Ir"]
>>> env.observation.spaces["Autophase"].space
Box(56,)
>>> env.observation["Autophase"]
[0, 1, ..., 2]
>>> observation["Ir"]
int main() {...}
"""
def __init__(
self,
raw_step: Callable[
[List[ActionType], List[ObservationType], List[RewardType]], StepType
],
spaces: List[ObservationSpace],
):
if not spaces:
raise ValueError("No observation spaces")
self.spaces: Dict[str, ObservationSpaceSpec] = {}
self._raw_step = raw_step
for i, s in enumerate(spaces):
self._add_space(ObservationSpaceSpec.from_proto(i, s))
def __getitem__(self, observation_space: str) -> ObservationType:
"""Request an observation from the given space.
:param observation_space: The observation space to query.
:return: An observation.
:raises KeyError: If the requested observation space does not exist.
:raises SessionNotFound: If :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` has not been called.
:raises ServiceError: If the backend service fails to compute the
observation, or reports that a terminal state has been reached.
"""
observation_space: ObservationSpaceSpec = self.spaces[observation_space]
observations, _, done, info = self._raw_step(
actions=[], observation_spaces=[observation_space], reward_spaces=[]
)
if done:
# Computing an observation should never cause a terminal state since
# no action has been applied.
msg = f"Failed to compute observation '{observation_space.id}'"
if info.get("error_details"):
msg += f": {info['error_details']}"
raise ServiceError(msg)
if len(observations) != 1:
raise ServiceError(
f"Expected 1 '{observation_space.id}' observation "
f"but the service returned {len(observations)}"
)
return observations[0]
def _add_space(self, space: ObservationSpaceSpec):
"""Register a new space."""
self.spaces[space.id] = space
# Bind a new method to this class that is a callback to compute the
# given observation space. E.g. if a new space is added with ID
# `FooBar`, this observation can be computed using
# env.observation.FooBar().
setattr(self, space.id, lambda: self[space.id])
def add_derived_space(
self,
id: str,
base_id: str,
**kwargs,
) -> None:
"""Internal API for adding a new observation space."""
base_space = self.spaces[base_id]
self._add_space(base_space.make_derived_space(id=id, **kwargs))
def __repr__(self):
return f"ObservationView[{', '.join(sorted(self.spaces.keys()))}]"
|
CompilerGym-development
|
compiler_gym/views/observation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from compiler_gym.views.observation import ObservationView
from compiler_gym.views.observation_space_spec import ObservationSpaceSpec
from compiler_gym.views.reward import RewardView
__all__ = ["ObservationView", "ObservationSpaceSpec", "RewardView"]
|
CompilerGym-development
|
compiler_gym/views/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Dict, List
from compiler_gym.datasets import Benchmark
from compiler_gym.spaces.reward import Reward
from compiler_gym.views.observation import ObservationView
class RewardView:
"""A view into a set of reward spaces.
Example usage:
>>> env = gym.make("llvm-v0")
>>> env.reset()
>>> env.reward.spaces["codesize"].range
(-np.inf, 0)
>>> env.reward["codesize"]
-1243
:ivar spaces: Specifications of available reward spaces.
:vartype spaces: Dict[str, Reward]
"""
def __init__(
self,
spaces: List[Reward],
observation_view: ObservationView,
):
self.spaces: Dict[str, Reward] = {}
self.previous_action = None
self._observation_view = observation_view
for space in spaces:
self._add_space(space)
def __getitem__(self, reward_space: str) -> float:
"""Request an observation from the given space.
:param reward_space: The reward space to query.
:return: A reward.
:raises KeyError: If the requested reward space does not exist.
:raises SessionNotFound: If :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` has not been called.
"""
# TODO(cummins): Since reward is a function from (state, action) -> r
# it would be better to make the list of rewards to evaluate an argument
# to env.step() rather than using this lazy view.
if not self.spaces:
raise ValueError("No reward spaces")
space = self.spaces[reward_space]
observations = [self._observation_view[obs] for obs in space.observation_spaces]
return space.update(self.previous_action, observations, self._observation_view)
def reset(self, benchmark: Benchmark, observation_view: ObservationView) -> None:
"""Reset the rewards space view. This is called on
:meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>`.
:param benchmark: The benchmark that is used for this episode.
"""
self.previous_action = None
for space in self.spaces.values():
space.reset(benchmark=benchmark, observation_view=observation_view)
def add_space(self, space: Reward) -> None:
"""Register a new :class:`Reward <compiler_gym.spaces.Reward>` space.
:param space: The reward space to be added.
"""
if space.name in self.spaces:
warnings.warn(f"Replacing existing reward space '{space.name}'")
self._add_space(space)
def _add_space(self, space: Reward):
"""Register a new space."""
self.spaces[space.name] = space
# Bind a new method to this class that is a callback to compute the
# given reward space. E.g. if a new space is added with name `FooBar`,
# this reward can be computed using env.reward.FooBar().
setattr(self, space.name, lambda: self[space.name])
|
CompilerGym-development
|
compiler_gym/views/reward.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, ClassVar, Optional, Union
from gym.spaces import Space
from compiler_gym.service.proto import Event, ObservationSpace, py_converters
from compiler_gym.util.gym_type_hints import ObservationType
from compiler_gym.util.shell_format import indent
class ObservationSpaceSpec:
"""Specification of an observation space.
:ivar id: The name of the observation space.
:vartype id: str
:ivar index: The index into the list of observation spaces that the service
supports.
:vartype index: int
:ivar space: The space.
:vartype space: Space
:ivar deterministic: Whether the observation space is deterministic.
:vartype deterministic: bool
:ivar platform_dependent: Whether the observation values depend on the
execution environment of the service.
:vartype platform_dependent: bool
:ivar default_value: A default observation. This value will be returned by
:func:`CompilerEnv.step() <compiler_gym.envs.CompilerEnv.step>` if
:func:`CompilerEnv.observation_space <compiler_gym.envs.CompilerEnv.observation_space>`
is set and the service terminates.
"""
message_converter: ClassVar[
Callable[[Any], Any]
] = py_converters.make_message_default_converter()
def __init__(
self,
id: str,
index: int,
space: Space,
translate: Callable[[Union[ObservationType, Event]], ObservationType],
to_string: Callable[[ObservationType], str],
deterministic: bool,
platform_dependent: bool,
default_value: ObservationType,
):
"""Constructor. Don't call directly, use make_derived_space()."""
self.id: str = id
self.index: int = index
self.space = space
self.deterministic = deterministic
self.platform_dependent = platform_dependent
self.default_value = default_value
self.translate = translate
self.to_string = to_string
def __hash__(self) -> int:
# Quickly hash observation spaces by comparing the index into the list
# of spaces returned by the environment. This means that you should not
# hash between observation spaces from different environments as this
# will cause collisions, e.g.
#
# # not okay:
# >>> obs = set(env.observation.spaces).union(
# other_env.observation.spaces
# )
#
# If you want to hash between environments, consider using the string id
# to identify the observation spaces.
return self.index
def __repr__(self) -> str:
return f"ObservationSpaceSpec({self.id})"
def __eq__(self, rhs) -> bool:
"""Equality check."""
if isinstance(rhs, str):
return self.id == rhs
elif isinstance(rhs, ObservationSpaceSpec):
return (
self.id == rhs.id
and self.index == rhs.index
and self.space == rhs.space
and self.platform_dependent == rhs.platform_dependent
and self.deterministic == rhs.deterministic
)
return False
@classmethod
def from_proto(cls, index: int, proto: ObservationSpace):
"""Create an observation space from a ObservationSpace protocol buffer.
:param index: The index of this observation space into the list of
observation spaces that the compiler service supports.
:param proto: An ObservationSpace protocol buffer.
:raises ValueError: If protocol buffer is invalid.
"""
try:
spec = ObservationSpaceSpec.message_converter(proto)
except ValueError as e:
raise ValueError(
f"Error interpreting description of observation space '{proto.name}'.\n"
f"Error: {e}\n"
f"ObservationSpace message:\n"
f"{indent(proto.space, n=2)}"
) from e
# TODO(cummins): Additional validation of the observation space
# specification would be useful here, such as making sure that the size
# of {low, high} tensors for box shapes match. At present, these errors
# tend not to show up until later, making it more difficult to debug.
return cls(
id=proto.name,
index=index,
space=spec,
translate=ObservationSpaceSpec.message_converter,
to_string=str,
deterministic=proto.deterministic,
platform_dependent=proto.platform_dependent,
default_value=ObservationSpaceSpec.message_converter(
proto.default_observation
),
)
def make_derived_space(
self,
id: str,
translate: Callable[[ObservationType], ObservationType],
space: Optional[Space] = None,
deterministic: Optional[bool] = None,
default_value: Optional[ObservationType] = None,
platform_dependent: Optional[bool] = None,
to_string: Callable[[ObservationType], str] = None,
) -> "ObservationSpaceSpec":
"""Create a derived observation space.
:param id: The name of the derived observation space.
:param translate: A callback function to compute a derived observation
from the base observation.
:param space: The :code:`gym.Space` describing the observation space.
:param deterministic: Whether the observation space is deterministic.
If not provided, the value is inherited from the base observation
space.
:param default_value: The default value for the observation space. If
not provided, the value is derived from the default value of the
base observation space.
:param platform_dependent: Whether the derived observation space is
platform-dependent. If not provided, the value is inherited from
the base observation space.
:param to_string: A callback to convert and observation to a string
representation. If not provided, the callback is inherited from the
base observation space.
:return: A new ObservationSpaceSpec.
"""
return ObservationSpaceSpec(
id=id,
index=self.index,
space=space or self.space,
translate=lambda observation: translate(self.translate(observation)),
to_string=to_string or self.to_string,
default_value=(
translate(self.default_value)
if default_value is None
else default_value
),
deterministic=(
self.deterministic if deterministic is None else deterministic
),
platform_dependent=(
self.platform_dependent
if platform_dependent is None
else platform_dependent
),
)
|
CompilerGym-development
|
compiler_gym/views/observation_space_spec.py
|
# Copyright 2021 The TF-Coder Authors.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines the FilterGroup enum.
Every function used in TF-Coder is associated with one such FilterGroup.
"""
import enum
# LINT.IfChange(FilterGroup)
@enum.unique
class FilterGroup(enum.Enum):
"""A group of similar operations that should have the same filters.
The number of arguments is crucially important when adding filters, so by
convention the enum names have the number of arguments at the end.
"""
# No filters. Even if some filtering might be reasonable, it could be faster
# to just try all values to avoid the filtering overhead.
NONE = "NONE"
#############################
# Operations with 1 argument.
# The argument is a shape.
SHAPE_1 = "SHAPE_1"
# The argument is a tensor.
TENSOR_1 = "TENSOR_1"
# The argument is a sequence of tensors.
TENSORSEQUENCE_1 = "TENSORSEQUENCE_1"
# The argument is a float tensor.
FLOATTENSOR_1 = "FLOATTENSOR_1"
# The argument is an int or float tensor.
NUMERICTENSOR_1 = "NUMERICTENSOR_1"
# The argument is a primitive or tensor.
PRIMITIVE_OR_TENSOR_1 = "PRIMITIVE_OR_TENSOR_1"
################################
# Operations with 2 arguments.
# The first argument is a tensor, and the second argument is an int
# representing an axis, i.e., an int in the range [1, rank_of_tensor).
TENSOR_AXIS_2 = "TENSOR_AXIS_2"
# The first argument is an int or float tensor, the second is an axis.
NUMERICTENSOR_AXIS_2 = "NUMERICTENSOR_AXIS_2"
# The first argument is a sequence of tensors, the second is an axis.
TENSORSEQUENCE_AXIS_2 = "TENSORSEQUENCE_AXIS_2"
# The first argument is a tensor, the second is a boolean tensor.
TENSOR_BOOLTENSOR_2 = "TENSOR_BOOLTENSOR_2"
# The two arguments are numeric (int or float) tensors with the same shape.
SAME_SHAPES_NUMERICTENSOR_2 = "SAME_SHAPES_NUMERICTENSOR_2"
# The two arguments are numeric (int or float) tensors with the same dtype,
# and the two tensors are broadcastable.
SAME_DTYPE_NUMERIC_BROADCASTABLE_2 = "SAME_DTYPE_NUMERIC_BROADCASTABLE_2"
# The first argument is a numeric tensor, and the second is either a scalar
# or a tensor. The two arguments are broadcastable.
ELEMENTWISE_COMPARISON_2 = "ELEMENTWISE_COMPARISON_2"
# The first argument is a numeric tensor, and the second is either a scalar
# or a tensor. The two arguments are broadcastable, but the must be different.
NE_BROADCASTABLE_2 = "NE_BROADCASTABLE_2"
#########################################
# Operations with other special handling.
# The argument contains nonnegative ints with a small maximum.
BINCOUNT_1 = "BINCOUNT_1"
# The argument results in a small tensor.
EYE_1 = "EYE_1"
# The argument results in a small tensor.
RANGE_1 = "RANGE_1"
# The argument is either a primitive or a sequence of primitives
TENSORIZABLE_1 = "TENSORIZABLE_1"
# The arguments should be 3-D tensors, and the first argument's
# third dimension size should be equal to the second argument's
# second dimension size.
BMM_2 = "BMM_2"
# The first argument is a sequence of tensors,
# the second is an axis in the range [-1, rank_of_tensor-1].
CAT_TENSORSEQUENCE_AXIS_2 = "CAT_TENSORSEQUENCE_AXIS_2"
# Both arguments are tensors and have the same shape.
# The dimensions should be greater than 1.
CDIST_2 = "CDIST_2"
# The first argument is a tensor, the second is an axis in the range
# [-1, rank_of_tensor-1]. Note that this range is slightly different from the
# TENSOR_AXIS_2 filter.
EXPAND_DIMS_2 = "EXPAND_DIMS_2"
# The first argument is a tensor, the second is an axis in the range
# [-1, rank_of_tensor].
EXPAND_DIMS_ADDITIONAL_2 = "EXPAND_DIMS_ADDITIONAL_2"
# The arguments result in a small tensor.
EYE_ROWS_COLS_2 = "EYE_ROWS_COLS_2"
# Ensures the tensors are both numeric and have the same dtype and rank.
MATMUL_2 = "MATMUL_2"
# Ensures the tensors are both numeric and have the same dtype and rank.
MM_2 = "MM_2"
# The first argument is a tensor, the second is an axis in the range
# [-1, rank_of_tensor-1]. The first argument must be float or int.
NORMALIZE_2 = "NORMALIZE_2"
# Ensures that torch.nn.functional.one_hot(indices, num_classes) produces a small result.
ONE_HOT_2 = "ONE_HOT_2"
# The first argument must be a tensor, and the second must be a nested int
# list or int32 tensor of shape [rank_of_arg_1, 2].
PAD_2 = "PAD_2"
# The first argument is a tensor, and the second is a tuple.
RESHAPE_2 = "RESHAPE_2"
# Ensures that torch.tile(input, multiples) produces a small result.
TILE_2 = "TILE_2"
# The first argument is sorted in the last dimension, the second argument is
# the same dtype and rank, and all dimension lengths match except the last.
SEARCHSORTED_2 = "SEARCHSORTED_2"
# The first argument is a tensor with more than 1 squeezable dimension, and
# the second argument is an int specifying a squeezable dimension.
SQUEEZE_2 = "SQUEEZE_2"
# The first argument is a non-scalar tensor, the second is a dimension, and
# the third is a tensor containing ints suitable for indexing into the first
# tensor.
GATHER_3 = "GATHER_3"
# The first argument is a tensor, the second is a tensor containing ints
# suitable for indexing into the first tensor on multiple dimensions, and the
# third is a number of batch dimensions.
INDEX_SELECT_3 = "INDEX_SELECT_3"
# The arguments result in a small tensor.
RANGE_3 = "RANGE_3"
# The first argument is a tensor, the second argument is either a numeric tensor
# or an integer, and the third argument is an int specifying the dimension.
REPEAT_3 = "REPEAT_3"
# The second and third arguments must be int primitives, lists of ints, or 1D
# int tensors, and they must have the same shape.
ROLL_3 = "ROLL_3"
# The first two arguments are tensors with the same dtype, and the third
# contains ints of the appropriate shape.
TENSORDOT_3 = "TENSORDOT_3"
# The first argument is a tensor, and the second and the third are dimensions
# to transpose.
TRANSPOSE_3 = "TRANSPOSE_3"
# Ensures that the shapes and dtypes for torch.where(condition, tensor, tensor/number) match.
WHERE_TENSOR_3 = "WHERE_TENSOR_3"
# Ensures that the shapes and dtypes for torch.where(condition, number, tensor/number) match.
WHERE_NUMERIC_3 = "WHERE_NUMERIC_3"
# LINT.ThenChange(value_search/operation_filtering.py:add_filters_to_function_operation)
|
APIsynth-master
|
Synthesis_incorporation/filter_group.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Lint as: python3
"""Functions and arguments used in the PyCoder project."""
import ast
import collections
import torch
from tf_coder import filter_group
FilterGroup = filter_group.FilterGroup
FunctionInfo = collections.namedtuple(
'FunctionInfo',
['name', 'filter_group', 'weight'])
# Weights for leaf nodes in the AST.
# Constants given by the user.
PROVIDED_CONSTANT_WEIGHT = 7
# Ubiquitous constants: 0, 1, -1.
COMMON_CONSTANT_WEIGHT = 8
# A torch.constant() wrapper around an input primitive.
PRIMITIVE_INPUT_AS_TENSOR_WEIGHT = 5
# Int constants meant to be axis values, chosen based on input tensor ranks.
AXIS_CONSTANT_WEIGHT = 14
# Int constants obtained from input/output tensor shapes.
SHAPE_CONSTANT_WEIGHT = 24
# Weight of constructing a tuple with the output shape.
OUTPUT_SHAPE_TUPLE_WEIGHT = 32
# Input variable nodes (in1, in2, etc.).
INPUT_VARIABLE_WEIGHT = 4
# DTypes with weights to add to the pool of constants.
CONSTANT_DTYPES_AND_WEIGHTS = collections.OrderedDict([
(torch.int32, 8),
(torch.float32, 8),
(torch.bool, 8),
(torch.int64, 16),
])
# Used in value search to convert primitive inputs (e.g., 3) into scalar tensors
# (e.g., torch.tensor(3)).
CONSTANT_OPERATION_NAME = 'torch.tensor(data)'
INT_OPERATION_NAME = 'IntOperation'
FLOAT_OPERATION_NAME = 'FloatOperation'
BOOL_OPERATION_NAME = 'BoolOperation'
# A list of FunctionInfo namedtuples, each describing one function usable by a
# program synthesizer. Each FunctionInfo's name contains the function name along
# with the names of the arguments for that function, in the order given in the
# function's signature. A function may appear multiple times with different
# lists of usable arguments. This list is ordered, so value search will try
# earlier functions before later ones.
# FunctionInfo name format: "torch.module.function(arg_1, arg_2, arg_3='value')"
# means call the function `torch.module.function` with varying inputs `arg_1` and
# `arg_2`, where `arg_3` is fixed and set to the literal constant `'value'`.
TORCH_FUNCTIONS = [
# FunctionInfo(name='torch.abs(input)',
# filter_group=FilterGroup.NUMERICTENSOR_1,
# weight=40),
FunctionInfo(name='torch.add(input, other)',
filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2,
weight=28),
# # FunctionInfo(name='torch.all(input)',
# # filter_group=FilterGroup.TENSOR_1,
# # weight=40),
# # FunctionInfo(name='torch.all(input, dim)',
# # filter_group=FilterGroup.EXPAND_DIMS_2,
# # weight=40),
FunctionInfo(name='torch.any(input)',
filter_group=FilterGroup.TENSOR_1,
weight=40),
FunctionInfo(name='torch.any(input, dim)',
filter_group=FilterGroup.EXPAND_DIMS_2,
weight=40),
FunctionInfo(name='torch.arange(end)',
filter_group=FilterGroup.RANGE_1,
weight=28),
# FunctionInfo(name='torch.arange(start, end, step)',
# filter_group=FilterGroup.RANGE_3,
# weight=56),
FunctionInfo(name='torch.argmax(input)',
filter_group=FilterGroup.NUMERICTENSOR_1,
weight=32),
FunctionInfo(name='torch.argmax(input, dim)',
filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
weight=32),
# # FunctionInfo(name='torch.argsort(input, dim, descending=True)',
# # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# # weight=48),
# # FunctionInfo(name='torch.argsort(input, dim, descending=False)',
# # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# # weight=48),
FunctionInfo(name='torch.bincount(input)',
filter_group=FilterGroup.BINCOUNT_1,
weight=40),
# # FunctionInfo(name='torch.bmm(input, mat2)',
# # filter_group=FilterGroup.BMM_2,
# # weight=40),
# # FunctionInfo(name='torch.cat(tensors, dim)',
# # filter_group=FilterGroup.CAT_TENSORSEQUENCE_AXIS_2,
# # weight=36),
FunctionInfo(name='torch.cdist(x1, x2)',
filter_group=FilterGroup.CDIST_2,
weight=48),
# # FunctionInfo(name='torch.cumsum(input, dim)',
# # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# # weight=44),
FunctionInfo(name='torch.div(input, other)',
filter_group=FilterGroup.NE_BROADCASTABLE_2,
weight=28),
FunctionInfo(name='torch.eq(input, other)',
filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2,
weight=24),
FunctionInfo(name='torch.eye(n)',
filter_group=FilterGroup.EYE_1,
weight=40),
# FunctionInfo(name='torch.eye(n, m)',
# filter_group=FilterGroup.EYE_ROWS_COLS_2,
# weight=60),
# # FunctionInfo(name='torch.flatten(input)',
# # filter_group=FilterGroup.TENSOR_1,
# # weight=23),
# # FunctionInfo(name='torch.flatten(input, start_dim)',
# # filter_group=FilterGroup.EXPAND_DIMS_2,
# # weight=23),
# # FunctionInfo(name='torch.flatten(input, start_dim, end_dim)',
# # filter_group=FilterGroup.TRANSPOSE_3,
# # weight=23),
FunctionInfo(name='torch.gather(input, dim, index)',
filter_group=FilterGroup.GATHER_3,
weight=48),
# # FunctionInfo(name='torch.ge(input, other)',
# # filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2,
# # weight=32),
FunctionInfo(name='torch.gt(input, other)',
filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2,
weight=24),
# # FunctionInfo(name='torch.index_select(input, dim, index)',
# # filter_group=FilterGroup.INDEX_SELECT_3,
# # weight=24),
# # FunctionInfo(name='torch.le(input, other)',
# # filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2,
# # weight=32),
FunctionInfo(name='torch.lt(input, other)',
filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2,
weight=24),
# # FunctionInfo(name='torch.logical_and(input, other)',
# # filter_group=FilterGroup.SAME_DTYPE_NUMERIC_BROADCASTABLE_2,
# # weight=24),
FunctionInfo(name='torch.masked_select(input, mask)',
filter_group=FilterGroup.TENSOR_BOOLTENSOR_2,
weight=28),
FunctionInfo(name='torch.matmul(input, other)',
filter_group=FilterGroup.MATMUL_2,
weight=24),
FunctionInfo(name='torch.max(input)',
filter_group=FilterGroup.NUMERICTENSOR_1,
weight=24),
# FunctionInfo(name='torch.max(input, dim)',
# filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# weight=24),
# # FunctionInfo(name='torch.maximum(input, other)',
# # filter_group=FilterGroup.SAME_SHAPES_NUMERICTENSOR_2,
# # weight=24),
# # FunctionInfo(name='torch.mean(input)',
# # filter_group=FilterGroup.NUMERICTENSOR_1,
# # weight=40),
# # FunctionInfo(name='torch.mean(input, dim)',
# # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# # weight=40),
# # FunctionInfo(name='torch.min(input)',
# # filter_group=FilterGroup.NUMERICTENSOR_1,
# # weight=24),
FunctionInfo(name='torch.minimum(input, other)',
filter_group=FilterGroup.SAME_SHAPES_NUMERICTENSOR_2,
weight=32),
# # FunctionInfo(name='torch.mm(input, mat2)',
# # filter_group=FilterGroup.MM_2,
# # weight=32),
FunctionInfo(name='torch.mul(input, other)',
filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2,
weight=24),
FunctionInfo(name='torch.ne(input, other)',
filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2,
weight=24),
# # FunctionInfo(name='torch.nonzero(input)',
# # filter_group=FilterGroup.TENSOR_1,
# # weight=24),
# # FunctionInfo(name='torch.nn.functional.normalize(input, dim)',
# # filter_group=FilterGroup.NORMALIZE_2,
# # weight=48),
FunctionInfo(name='torch.nn.functional.one_hot(input, num_classes)',
filter_group=FilterGroup.ONE_HOT_2,
weight=28),
# # FunctionInfo(name='torch.nn.functional.pad(input, pad, mode="constant")',
# # filter_group=FilterGroup.PAD_2,
# # weight=40),
# # FunctionInfo(name='torch.prod(input, dim)',
# # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# # weight=52),
FunctionInfo(name='torch.repeat_interleave(input, repeats, dim)',
filter_group=FilterGroup.REPEAT_3,
weight=48),
FunctionInfo(name='torch.reshape(input, shape)',
filter_group=FilterGroup.RESHAPE_2,
weight=28),
FunctionInfo(name='torch.roll(input, shifts, dims)',
filter_group=FilterGroup.ROLL_3,
weight=48),
FunctionInfo(name='torch.searchsorted(sorted_sequence, input)',
filter_group=FilterGroup.SEARCHSORTED_2,
weight=56),
# # FunctionInfo(name='torch.sort(input, dim)',
# # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# # weight=52),
# # FunctionInfo(name='torch.sort(input, dim, descending=True)',
# # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# # weight=60),
FunctionInfo(name='torch.squeeze(input)',
filter_group=FilterGroup.TENSOR_1,
weight=23),
FunctionInfo(name='torch.squeeze(input, dim)',
filter_group=FilterGroup.SQUEEZE_2,
weight=23),
# # FunctionInfo(name='torch.sqrt(input)',
# # filter_group=FilterGroup.NUMERICTENSOR_1,
# # weight=56),
FunctionInfo(name='torch.square(input)',
filter_group=FilterGroup.NUMERICTENSOR_1,
weight=28),
FunctionInfo(name='torch.stack(tensors)',
filter_group=FilterGroup.TENSORSEQUENCE_1,
weight=36),
FunctionInfo(name='torch.stack(tensors, dim)',
filter_group=FilterGroup.TENSORSEQUENCE_AXIS_2,
weight=36),
# # FunctionInfo(name='torch.std(input, dim)',
# # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
# # weight=40),
# # FunctionInfo(name='torch.sub(input, other)',
# # filter_group=FilterGroup.NE_BROADCASTABLE_2,
# # weight=28),
FunctionInfo(name='torch.sum(input)',
filter_group=FilterGroup.NUMERICTENSOR_1,
weight=24),
FunctionInfo(name='torch.sum(input, dim)',
filter_group=FilterGroup.NUMERICTENSOR_AXIS_2,
weight=24),
# FunctionInfo(name=CONSTANT_OPERATION_NAME,
# filter_group=FilterGroup.TENSORIZABLE_1,
# weight=24),
FunctionInfo(name='torch.tensordot(a, b, dims)',
filter_group=FilterGroup.TENSORDOT_3,
weight=24),
FunctionInfo(name='torch.tile(input, dims)',
filter_group=FilterGroup.TILE_2,
weight=28),
FunctionInfo(name='torch.transpose(input, dim0, dim1)',
filter_group=FilterGroup.TRANSPOSE_3,
weight=24),
FunctionInfo(name='torch.where(condition, input, other)',
filter_group=FilterGroup.WHERE_TENSOR_3,
weight=24),
FunctionInfo(name='torch.where(condition, self, other)',
filter_group=FilterGroup.WHERE_NUMERIC_3,
weight=24),
# # FunctionInfo(name='torch.unique(input)',
# # filter_group=FilterGroup.TENSOR_1,
# # weight=48),
FunctionInfo(name='torch.unsqueeze(input, dim)',
filter_group=FilterGroup.EXPAND_DIMS_ADDITIONAL_2,
weight=22),
# # FunctionInfo(name='torch.zeros(size)',
# # filter_group=FilterGroup.SHAPE_1,
# # weight=40),
]
SPARSE_FUNCTIONS = [
]
def parse_function_info_name(function_info):
"""Takes a FunctionInfo and returns (function_name, list_of_args).
Args:
function_info: A FunctionInfo namedtuple.
Returns:
A tuple (function_name, list_of_args, constant_kwargs), where function_name
is a string, list_of_args is a list of strings, and constant_kwargs is a
dict mapping argument names to their constant literal values. For example,
if the FunctionInfo's name is 'torch.foo.bar(x, axis, baz=True)', then
this function would return ('torch.foo.bar', ['x', 'axis'], {'baz': True}).
Raises:
ValueError: If the FunctionInfo's name is not properly formatted.
"""
name = function_info.name
if name.count('(') != 1:
raise ValueError("The FunctionInfo's name must have exactly one open "
"parenthesis.")
if name.count(')') != 1 or name[-1] != ')':
raise ValueError("The FunctionInfo's name must have exactly one close "
"parenthesis, at the end of the name.")
open_paren = name.index('(')
close_paren = name.index(')')
function_name = name[ : open_paren]
arg_list = name[open_paren + 1 : close_paren]
split_by_comma = [arg.strip() for arg in arg_list.split(',')]
list_of_args = []
constant_kwargs = collections.OrderedDict()
for part in split_by_comma:
if '=' in part:
kwarg_name, literal_as_string = [x.strip() for x in part.split('=')]
constant_kwargs[kwarg_name] = ast.literal_eval(literal_as_string)
else:
list_of_args.append(part)
return function_name, list_of_args, constant_kwargs
|
APIsynth-master
|
Synthesis_incorporation/torch_functions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch as T
from torch import nn
EMBEDDING_SIZE = 150
SHAPE_EMBEDDING_SIZE = 6
class pycoder_parameters:
''' Core Fuzzing Parameters '''
NUM_FUZZ_PER_API= 100000 #000
NUM_TEST_FUZZ = 2
FLOAT_TENSOR = False #We either generate float or integer tensors
UNIT_TEST = False
COMPOSITE = True
''' Fuzzing Detailed Parameters '''
MAX_TENSOR_DIMENSIONS = 3 #how many rows, columns, etc.
MIN_VAL_PER_DIMENSION = 1 # e.g., min number of rows, columns, etc.
MAX_VAL_PER_DIMENSION = 5 # e.g., max number of rows, columns, etc.
#So far limiting to integers
MIN_TENSOR_VALUE = 1
MAX_TENSOR_VALUE = 15
''' Embedding Parameters '''
EMBEDDING_NOISE_LEVEL = 0 #0 noise by default
EMBEDDING_SIZE = 150
SHAPE_EMBEDDING_SIZE = 6
data_type = 'float' if FLOAT_TENSOR is True else 'integer'
model_type = 'Composite_' if COMPOSITE is True else 'Single_'
file_name = str(model_type) + str(NUM_FUZZ_PER_API) + '_' + data_type
fuzzing = file_name + '.pt'
embedding = file_name + '.embedding' + '.pt',
classification = file_name + '.model_result' + '.pt'
train_valid_test = file_name + 'train_valid_test.pt'
def setNoiseLevel(self, noise):
self.EMBEDDING_NOISE_LEVEL = noise
self.embedding = self.file_name + '.embedding' + '_' + str(self.EMBEDDING_NOISE_LEVEL) + '.pt'
def getEmbeddingFile(self):
return(self.file_name + '.embedding' + '_' + str(self.EMBEDDING_NOISE_LEVEL) + '.pt')
def getVisulizationFile(self):
return(self.file_name + '.embedding' + '_' + str(self.EMBEDDING_NOISE_LEVEL) + '_' + 'tSNE.pt')
class Net(torch.nn.Module):
def __init__(self, settings, len_api):
super(Net, self).__init__()
first_layer_size = settings.model.embedding_size
if settings.model.use_shape_encoding:
first_layer_size += settings.model.shape_embedding_size
if settings.model.use_type_encoding:
first_layer_size += 2
self.hid1 = torch.nn.Linear(4*(first_layer_size+1), 500)
self.hid2 = torch.nn.Linear(500, 250)
self.hid3 = torch.nn.Linear(250, 100)
self.oupt = torch.nn.Linear(100, len_api)
torch.nn.init.xavier_uniform_(self.hid1.weight)
torch.nn.init.zeros_(self.hid1.bias)
torch.nn.init.xavier_uniform_(self.hid2.weight)
torch.nn.init.zeros_(self.hid2.bias)
torch.nn.init.xavier_uniform_(self.oupt.weight)
torch.nn.init.zeros_(self.oupt.bias)
torch.nn.Dropout(p=0.2)
def forward(self, x):
z1 = torch.tanh(self.hid1(x))
z2 = torch.tanh(self.hid2(z1))
z3 = torch.tanh(self.hid3(z2))
z = self.oupt(z3) # no softmax: CrossEntropyLoss()
return (z, z3, z2, z1)
class FFNet(T.nn.Module):
def __init__(self):
super(FFNet, self).__init__()
NOISE = 0
f = pycoder_parameters()
f.setNoiseLevel(NOISE)
f.embedding = f.getEmbeddingFile()
print(f.embedding)
print(f.SHAPE_EMBEDDING_SIZE)
self.hid1 = T.nn.Linear(4*(f.EMBEDDING_SIZE+f.SHAPE_EMBEDDING_SIZE+1+2), 500)
self.hid2 = T.nn.Linear(500, 250)
self.hid3 = T.nn.Linear(250, 100)
# self.oupt = T.nn.Linear(100, len(api2indx))
self.oupt = T.nn.Linear(100, 33)
T.nn.init.xavier_uniform_(self.hid1.weight)
T.nn.init.zeros_(self.hid1.bias)
T.nn.init.xavier_uniform_(self.hid2.weight)
T.nn.init.zeros_(self.hid2.bias)
T.nn.init.xavier_uniform_(self.oupt.weight)
T.nn.init.zeros_(self.oupt.bias)
T.nn.Dropout(p=0.2)
def forward(self, x):
z1 = T.tanh(self.hid1(x))
z2 = T.tanh(self.hid2(z1))
z3 = T.tanh(self.hid3(z2))
z = self.oupt(z3) # no softmax: CrossEntropyLoss()
return (z, z3, z2, z1)
class RNNModel(nn.Module):
def __init__(self, input_size, output_size, hidden_dim, n_layers):
super(RNNModel, self).__init__()
# Defining some parameters
self.hidden_dim = hidden_dim
self.n_layers = n_layers
#Defining the layers
# RNN Layer
self.rnn = nn.RNN(input_size, hidden_dim, n_layers, batch_first=True, bidirectional=True)
# Fully connected layer
self.fc = nn.Linear(hidden_dim*2, output_size)
def forward(self, x):
batch_size = x.size(0)
#Initializing hidden state for first input using method defined below
hidden = self.init_hidden(batch_size)
# Passing in the input and hidden state into the model and obtaining outputs
out, hidden = self.rnn(x, hidden)
out1 = out.contiguous().view(-1, self.hidden_dim*2)
out1 = self.fc(out1)
return out1, hidden, out
def init_hidden(self, batch_size):
device = T.device("cpu")
# This method generates the first hidden state of zeros which we'll use in the forward pass
hidden = torch.zeros(self.n_layers*2, batch_size, self.hidden_dim).to(device)
# We'll send the tensor holding the hidden state to the device we specified earlier as well
return hidden
|
APIsynth-master
|
Synthesis_incorporation/models/models.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Lint as: python3
"""An interface for predicting operations given input and output."""
import abc
import torch
import torch.nn.functional as F
from typing import Dict, List, Optional, Text
from itertools import product
import six
from tf_coder.benchmarks import benchmark as benchmark_module
from tf_coder.value_search import all_operations
from tf_coder.value_search import operation_base
from tf_coder.value_search import value_search_settings as settings_module
from tf_coder.value_search import value as value_module
from tf_coder.models.models import Net
from iopath.common.file_io import PathManager
from iopath.fb.manifold import ManifoldPathHandler
def load_checkpoint(checkpoint_path, map_location=None):
pm = PathManager()
pm.register_handler(ManifoldPathHandler())
with pm.open(checkpoint_path, "rb") as f:
if map_location is not None:
checkpoint = torch.load(f, map_location=map_location)
else:
checkpoint = torch.load(f, map_location=lambda storage, loc: storage)
return checkpoint
@six.add_metaclass(abc.ABCMeta)
class PredictionModel(object):
"""Apply prediction model's results in PyCoder.
Attributes:
operations: A list of operations that the handler knows about.
all_names: A list of operation names, in the same order as the `operations`
list.
"""
def __init__(self, operations: Optional[List[operation_base.Operation]] = None):
"""Initializes the handler.
Args:
operations: A list of operations that the scorer should handle. Exposed
for testing.
Raises:
ValueError: If there are duplicate operation names.
"""
self.operations = (
operations
if operations
else all_operations.get_operations(include_sparse_operations=True)
)
self.all_names = [operation.name for operation in self.operations]
if len(set(self.all_names)) != len(self.operations):
raise ValueError("Duplicate operation name.")
@abc.abstractmethod
def get_operation_multipliers(
self, benchmark: benchmark_module.Benchmark, settings: settings_module.Settings
) -> Dict[Text, float]:
"""Returns a map from operation names to their weight multiplier.
The weight multiplier should be between 0 and 1 if the operation should be
prioritized, or greater than 1 if it should be deprioritized.
Args:
benchmark: Benchmark object corresponding to the TF-Coder task.
settings: A Settings object storing settings for this search.
Returns:
A map from operation name to weight multiplier, such that the operation
with that name should have its weight modified by that multiplier. If the
dict does not contain a key, it means the weight should not be modified
(equivalent to a multiplier of 1).
"""
def __repr__(self) -> Text:
"""Returns a string containing details about this handler and parameters."""
return self.__class__.__name__
class ClassificationModel(PredictionModel):
def __init__(
self,
settings: settings_module.Settings,
operations: Optional[List[operation_base.Operation]] = None,
):
super(ClassificationModel, self).__init__(operations)
self.checkpoint_path = settings.model.checkpoint_path
self.api_map_path = settings.model.api_map_path
self.multi_ffn_path = settings.model.multi_ffn_path
self.multi_rnn_path = settings.model.multi_rnn_path
self.multi_api_map_path = settings.model.multi_api_map_path
self.api2indx = load_checkpoint(self.api_map_path)
self.multi_api2indx = load_checkpoint(self.multi_api_map_path)
self.embedding_size = settings.model.embedding_size
self.shape_embedding_size = settings.model.shape_embedding_size
self.use_shape_encoding = settings.model.use_shape_encoding
self.use_type_encoding = settings.model.use_type_encoding
self.use_value_encoding = settings.model.use_value_encoding
self.rnn_hidden_dims = settings.model.rnn_hidden_dims
self.rnn_num_layers = settings.model.rnn_num_layers
self.settings = settings
# self.load_model(settings.model.use_multi_model)
self.load_model(settings)
def load_model(self, settings):
device = torch.device("cpu")
if settings.model.use_multi_model or settings.model.do_first_in_seq:
self.multi_ffn_model = load_checkpoint(self.multi_ffn_path).to(device)
self.multi_model = load_checkpoint(self.multi_rnn_path).to(device)
self.indx2api = {v: k for k, v in self.multi_api2indx.items()}
if self.multi_api2indx.get('<eol>', -1) == -1:
max_key = max(self.indx2api.keys())
self.indx2api[max_key+1] = '<eol>'
self.multi_api2indx['<eol>'] = max_key+1
else:
self.model = Net(self.settings, len(self.api2indx)).to(device)
checkpoint = load_checkpoint(self.checkpoint_path)
self.model.load_state_dict(checkpoint)
self.model.eval()
# check input tensor type and adjust model
def embed_benchmark_example(self, example):
it_pad = []
input_list = example.inputs
for input_tensor in input_list:
input_tensor = torch.tensor(input_tensor)
it_pad.append(self.tensor_flatten_pad(input_tensor))
for _ in range(len(it_pad),3):
t = torch.zeros(self.embedding_size + self.shape_embedding_size + 2 + 1)
t[-1] = -1
it_pad.append(t)
ot_pad = self.tensor_flatten_pad(example.output, isNoise=False)
domain_embedding = torch.flatten(torch.stack((it_pad[0], it_pad[1], it_pad[2], ot_pad)))
return domain_embedding
def embed_benchmark_value(self, example):
it_pad = []
input_list = example['inputs']
for input_tensor in input_list:
if input_tensor == 0:
embedding_size = self.embedding_size
if self.use_shape_encoding:
embedding_size += self.shape_embedding_size
if self.use_type_encoding:
embedding_size += 2
it_pad.append(torch.zeros(embedding_size + 1))
else:
if input_tensor.is_tensor:
input_tensor = input_tensor.value
elif input_tensor.is_sequence and not input_tensor.elem_type_is_tensor:
input_tensor = torch.tensor(input_tensor.value)
else:
input_tensor = torch.tensor(input_tensor.value)
it_pad.append(self.tensor_flatten_pad(input_tensor))
for _ in range(len(it_pad),3):
embedding_size = self.embedding_size
if self.use_shape_encoding:
embedding_size += self.shape_embedding_size
if self.use_type_encoding:
embedding_size += 2
t = torch.zeros(embedding_size + 1)
t[-1] = -1
it_pad.append(t)
output_tensor = example['output'].value
if not isinstance(output_tensor, torch.Tensor):
output_tensor = torch.tensor(output_tensor.value)
ot_pad = self.tensor_flatten_pad(output_tensor)
domain_embedding = torch.flatten(torch.stack((it_pad[0], it_pad[1], it_pad[2], ot_pad)))
return domain_embedding.float()
def encode_values_to_code(self, tensor):
tensor = tensor.clone()
tensor[(tensor>=100) & (tensor<1000)] = 100
tensor[(tensor>=1000)] = 101
tensor[(tensor<=-20) & (tensor>-100)] = -20
tensor[(tensor<=-100) & (tensor>-1000)] = -21
tensor[(tensor<=-1000)] = -22
return tensor
def tensor_flatten_pad(
self, tensor, embed_size = None, shape_embed_size = None, isNoise = False
):
if embed_size is None:
embed_size = self.embedding_size
if shape_embed_size is None:
shape_embed_size = self.shape_embedding_size
if not isinstance(tensor, torch.Tensor):
tensor = torch.tensor(tensor)
t_flatten = torch.flatten(tensor)
if self.use_value_encoding:
t_flatten = self.encode_values_to_code(t_flatten)
padding_length = embed_size - list(t_flatten.shape)[-1]
p1d = (0,padding_length) #just padding the last dimension
t_pad = F.pad(input=t_flatten, pad=p1d, mode='constant', value=0)
if self.use_type_encoding:
type_padding = 0
if tensor.dtype == torch.bool:
type_padding = 1
if tensor.dtype == torch.float:
type_padding = 2
'''size embedding'''
if self.use_shape_encoding:
if not isinstance(tensor, torch.Tensor):
t_shape = []
else:
t_shape = list(tensor.shape)
padding_length = shape_embed_size -1 - len(t_shape)
p1d = (0,padding_length) #just padding the last dimension
s_pad = F.pad(input=torch.tensor(t_shape), pad=p1d, mode='constant', value=0)
t_pad_list = t_pad.tolist()
s_pad_list = s_pad.tolist()
if self.use_type_encoding:
tensor_embedding = torch.tensor([type_padding] + [-1] + t_pad_list + [-1] + s_pad_list + [-1])
else:
tensor_embedding = torch.tensor(t_pad_list + [-1] + s_pad_list + [-1])
else:
t_pad_list = t_pad.tolist()
if self.use_type_encoding:
tensor_embedding = torch.tensor([type_padding] + [-1] + t_pad_list + [-1])
else:
tensor_embedding = torch.tensor(t_pad_list + [-1])
return tensor_embedding.float()
def predict_operation(self, example, top_n, threshold, is_example, settings):
if is_example:
domain_embedding = self.embed_benchmark_example(example)
else:
domain_embedding = self.embed_benchmark_value(example)
with torch.no_grad():
predicts, _, _, _ = self.model(domain_embedding)
confidence = predicts
num_gt_threshold = sum(c >= threshold for c in confidence)
predicted_api_list = (torch.argsort(predicts, descending=True)).numpy()
topn_list = predicted_api_list[:min(top_n, num_gt_threshold)]
topn_operations = [list(self.api2indx.keys())[list(self.api2indx.values()).index(api)] for api in topn_list]
topn_confidences = [confidence[api].item() for api in topn_list]
return topn_operations, topn_confidences
def predict_sequence(self, example_sequence, top_n, beam_n, threshold, is_example, settings):
if is_example:
domain_embedding = self.embed_benchmark_example(example_sequence)
else:
embeddings = []
for example in example_sequence:
embeddings.append(self.embed_benchmark_value(example))
for _ in range(len(example_sequence), 3):
embeddings.append(torch.zeros(embeddings[0].shape))
domain_embedding = torch.stack((embeddings[0], embeddings[1], embeddings[2]))
with torch.no_grad():
predicts, z3, z2, z1 = self.multi_ffn_model(domain_embedding)
temp_z3 = torch.unsqueeze(z3,0)
model_output, hidden, int_output = self.multi_model(temp_z3)
topn_list = []
topn_prob_list = []
for i, m in enumerate(model_output):
topn = []
topn_prob = []
prob = torch.nn.functional.softmax(m, dim=0).data
# Taking the class with the highest probability score from the output
topn_ops = torch.topk(prob,beam_n,dim=0)[1]
if settings.printing.predicted_operations:
print(i, topn_ops)
for op in topn_ops.cpu().numpy():
if settings.printing.predicted_operations:
print(self.indx2api[op])
topn.append(self.indx2api[op])
topn_prob.append(prob[op].item())
topn_list.append(topn)
topn_prob_list.append(topn_prob)
if settings.printing.predicted_operations:
print('====')
topn_operations = list(product(topn_list[0], topn_list[1], topn_list[2]))
topn_confidences = list(product(topn_prob_list[0], topn_prob_list[1], topn_prob_list[2]))
topn_confidences = [c[0]*c[1]*c[2] for c in topn_confidences]
num_gt_threshold = min(sum(c > threshold for c in topn_confidences), top_n)
topn_operations = [operation for _, operation in sorted(zip(topn_confidences, topn_operations), reverse=True, key=lambda pair: pair[0])]
topn_confidences = sorted(topn_confidences, reverse=True)
return topn_operations[:num_gt_threshold], topn_confidences[:num_gt_threshold]
def get_operation_multipliers(
self, benchmark: benchmark_module.Benchmark, settings: settings_module.Settings
) -> Dict[Text, float]:
"""See base class."""
if settings.model.use_multi_model:
predicted_operations, confidence = self.predict_sequence(benchmark.examples[0], settings.model.multiplier_top_n, settings.model.threshold, True, settings)
else:
predicted_operations, confidence = self.predict_operation(benchmark.examples[0], settings.model.multiplier_top_n, settings.model.threshold, True, settings)
if settings.printing.prioritized_operations:
if settings.model.use_multi_model:
print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_operations, confidence)])))
predicted_operations = [{item for seq in predicted_operations for item in seq}]
else:
print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_operations, confidence)])))
multipliers = {}
for name in self.all_names:
if name.startswith("torch.") and "(" in name:
function_name = name[len("torch.") : name.index("(")].lower()
if function_name in predicted_operations:
if settings.printing.prioritized_operations:
print(
"Classification Model prioritized {}".format(name)
)
multipliers[name] = settings.model.multiplier
return multipliers
def get_predicted_sequence(
self, example_sequence, settings: settings_module.Settings
) -> List[operation_base.Operation]:
"""See base class."""
predicted_operations, confidence = self.predict_sequence(example_sequence, settings.model.iterative_top_n, settings.model.beam_n, settings.model.threshold, False, settings)
if settings.printing.predicted_operations:
print()
for example in example_sequence:
print("With example, inputs: [{}],".format(", ".join([i.reconstruct_expression() if isinstance(i, value_module.Value) else str(i) for i in example['inputs']])))
# print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_operations, confidence)])))
print(predicted_operations)
print(confidence)
operation_list = []
for sequence in predicted_operations:
sequence_list = []
for op in sequence:
# sequence_list: [[op1_1, op1_2, ...], [op2_1, op2_2, ...], ]
if op == '<eol>':
break
sequence_list.append(all_operations.find_operation_with_partial_name(op))
operation_list.extend(product(*sequence_list))
return operation_list
def get_predicted_operations(
self, example, settings: settings_module.Settings
) -> List[operation_base.Operation]:
"""See base class."""
predicted_operations, confidence = self.predict_operation(example, settings.model.iterative_top_n, settings.model.threshold, False, settings)
if settings.printing.predicted_operations:
print()
print("With example, inputs: ({}),".format(", ".join([i.reconstruct_expression() for i in example['inputs']])))
print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_operations, confidence)])))
operation_list = []
for op in predicted_operations:
operation_list.extend(all_operations.find_operation_with_partial_name(op))
return operation_list
def __repr__(self) -> Text:
"""See base class."""
return "{}".format(self.__class__.__name__)
def predict_first_in_sequence(self, example_sequence, top_n, threshold, is_example, settings):
if is_example:
domain_embedding = self.embed_benchmark_example(example_sequence)
else:
embeddings = []
embeddings.append(self.embed_benchmark_value(example_sequence))
# for _ in range(1, 3):
for _ in range(len(embeddings), 3):
embeddings.append(torch.zeros(embeddings[0].shape))
domain_embedding = torch.stack((embeddings[0], embeddings[1], embeddings[2]))
with torch.no_grad():
predicts, z3, z2, z1 = self.multi_ffn_model(domain_embedding)
temp_z3 = torch.unsqueeze(z3, 0)
model_output, hidden, int_output = self.multi_model(temp_z3)
topn_operations = []
topn_confidences = []
topn = []
topn_prob = []
prob = torch.nn.functional.softmax(model_output[0], dim=0).data
topn_ops = torch.topk(prob, top_n, dim=0)[1]
for op in topn_ops.cpu().numpy():
if settings.printing.predicted_operations:
print(self.indx2api[op])
topn.append(self.indx2api[op])
topn_prob.append(prob[op].item())
# topn_operations.append(topn)
topn_operations = topn
# topn_confidences.append(topn_prob)
topn_confidences = topn_prob
num_gt_threshold = sum(c > threshold for c in topn_confidences)
topn_operations = [operation for _, operation in sorted(zip(topn_confidences, topn_operations), reverse=True, key=lambda pair: pair[0])]
topn_confidences = sorted(topn_confidences, reverse=True)
return topn_operations[:num_gt_threshold], topn_confidences[:num_gt_threshold]
def get_first_in_sequence(
self, example, settings: settings_module.Settings
) -> List[operation_base.Operation]:
if settings.printing.predicted_operations:
print()
print("With example, inputs: ({}),".format(", ".join([i.reconstruct_expression() for i in example['inputs']])))
predicted_opeations,confidence = self.predict_first_in_sequence(example, settings.model.iterative_top_n, settings.model.threshold, False, settings)
if settings.printing.predicted_operations:
print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_opeations, confidence)])))
operation_list = []
for op in predicted_opeations:
if op != '<eol>':
operation_list.extend(all_operations.find_operation_with_partial_name(op))
return operation_list
PREDICTION_TO_NAME_MAP = {
'abs': "torch.abs",
'add': "torch.add",
'all': "torch.all",
'any': "torch.any",
'arange': "torch.arange",
'argmax': "torch.argmax",
'argsort': "torch.argsort",
'bincount': "torch.bincount",
'cat': "torch.cat",
'cdist': "torch.cdist",
'cumsum': "torch.cumsum",
'div': "torch.div",
'eq': "torch.eq",
'expand': "ExpandOperation",
'eye': "torch.eye",
'flatten': "torch.flatten",
'gather': "torch.gather",
'ge': "torch.ge",
'gt': "torch.gt",
'index_select': "torch.index_select",
'le': "torch.le",
'lt': "torch.lt",
'logical_and': "torch.logical_and",
'masked_select': "torch.masked_select",
'matmul': "torch.matmul",
'max': "torch.max",
'maximum': "torch.maximum",
'mean': "torch.mean",
'min': "torch.min",
'minimum': "torch.minimum",
'mul': "torch.mul",
'ne': "torch.ne",
'nonzero': "torch.nonzero",
'normalize': "torch.nn.functional.normalize",
'one_hot': "torch.nn.functional.one_hot",
'pad': "torch.nn.functional.pad",
'prod': "torch.prod",
'repeat_interleave': "torch.repeat_interleave",
'reshape': "torch.reshape",
'roll': "torch.roll",
'searchsorted': "torch.searchsorted",
'sort': "torch.sort",
'squeeze': "torch.squeeze",
'sqrt': "torch.sqrt",
'square': "torch.square",
'stack': "torch.stack",
'sub': "torch.sub",
'sum': "torch.sum",
'tensordot': "torch.tensordot",
'tile': "torch.tile",
'transpose': "torch.transpose",
'where': "torch.where",
'unique': "torch.unique",
'unsqueeze': "torch.unsqueeze",
'zeros': "torch.zeros",
"masked": "torch.masked_select",
"index": "torch.index_select",
"logical": "torch.logical_and",
"onehot": "nn.functional.one_hot",
"float": "FloatOperation",
"bool": "BoolOperation",
"int": "IntOperation"
}
|
APIsynth-master
|
Synthesis_incorporation/models/prediction_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Lint as: python3
"""Creates prediction model from strings."""
import collections
from typing import Callable, Dict, List, Text
from tf_coder.models import prediction_model
from tf_coder.value_search import value_search_settings as settings_module
# Use lambdas to avoid instantiating handlers until they're used.
PREDICTION_MODEL_FNS = collections.OrderedDict(
[
("classification", prediction_model.ClassificationModel),
]
) # type: Dict[Text, Callable[[], prediction_model.PredictionModel]]
def handler_string_list() -> List[Text]:
"""Returns a list of available handler strings."""
return list(PREDICTION_MODEL_FNS.keys())
def load_model(handler_string: Text, settings: settings_module.Settings) -> prediction_model.PredictionModel:
"""Returns a PredictionModel corresponding to the given handler string."""
if handler_string not in PREDICTION_MODEL_FNS:
raise ValueError("Unknown snippet handler: {}".format(handler_string))
# Evaluate the lambda to get the handler.
return PREDICTION_MODEL_FNS[handler_string](settings)
|
APIsynth-master
|
Synthesis_incorporation/models/prediction_model_factory.py
|
# Copyright 2021 The TF-Coder Authors.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Benchmarks collected/inspired from StackOverflow."""
# Avoid wrapping URLs and target programs to ease clicking and copying.
# pylint: disable=line-too-long
# Every function in this module takes no arguments and creates a benchmark.
# pylint: disable=missing-docstring
import math
import torch
from tf_coder.benchmarks import benchmark
def stackoverflow_01():
examples = [
benchmark.Example(
inputs=[
# [[5.0, 2.0], [1.0, 3.0], [0.0, -1.0]],
[[5, 2], [1, 3], [0, 2]]
],
output=[
# [[5.0, 5.0], [1.0, 1.0], [0.0, 0.0]],
# [[2.0, 2.0], [3.0, 3.0], [-1.0, -1.0]],
[[5, 5], [1, 1], [0, 0]],
[[2, 2], [3, 3], [2, 2]]
],
),
]
constants = []
description = "reshape by separating and duplicating columns"
target_program = "torch.transpose(torch.stack((in1, in1)), 0, 2)"
source = "https://stackoverflow.com/questions/40441503/tensorflow-tensor-reshape"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_01",
)
def stackoverflow_02():
examples = [
benchmark.Example(
inputs=[
# [5, 1, 0, 3, 0, -1, 2, -10, 2],
[5, 1, 0, 3, 0, 0, 2, 0, 2],
1,
],
output=[1, 1, 0, 1, 0, 0, 1, 0, 1]
# [1, 1, 0, 1, 0, -1, 1, -10, 1],
),
]
constants = [1]
description = "clip values that are greater than 1"
target_program = "torch.minimum(in1, torch.tensor(1))"
source = (
"https://stackoverflow.com/questions/46408839/tensorflow-trim-values-in-tensor"
)
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_02",
)
@benchmark.ignore('Out of scope')
def stackoverflow_03():
examples = [
benchmark.Example(
inputs=[
[[11, 22, 33, 44, 55, 66, 77], [70, 60, 50, 40, 30, 20, 10]],
[[-9, -8, -7, -6, -5, -4, -3], [11, 12, 13, 14, 15, 16, 17]],
],
output=[[11, 22, 33, -6, -5, 66, 77], [70, 60, 50, 14, 15, 20, 10]],
),
]
constants = [3, 4, 5]
description = "replace certain columns with columns from the other tensor"
target_program = """
mask = torch.sum(torch.nn.functional.one_hot(torch.tensor(range(3,5)), in1.size(1)), 0)
solution = torch.add(torch.mul(mask, in2), torch.mul(torch.sub(torch.ones(mask.size(), dtype=torch.int), mask), in1))
"""
source = "https://stackoverflow.com/questions/44657388/how-to-replace-certain-values-in-tensorflow-tensor-with-the-values-of-the-other"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_03",
)
@benchmark.ignore('Out of scope')
def stackoverflow_04():
examples = [
benchmark.Example(
inputs=[
[[12, 23, 34, 45], [66, 77, 88, 99]],
[[0, 1], [0, 1], [1, 0], [0, 0]],
[[2, 1], [1, 2], [0, 2], [0, 0]],
],
output=[[34, 77], [23, 88], [66, 34], [12, 12]],
),
]
constants = []
description = "index into the tensor"
target_program = """
idxs = torch.stack((in2, in3), dim=1)
solution = in1[idxs[:, 0], idxs[:, 1]]
"""
source = "https://stackoverflow.com/questions/33736795/tensorflow-numpy-like-tensor-indexing"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_04",
)
def stackoverflow_05():
examples = [
benchmark.Example(
inputs=[
[[4, 3, 1], [6, 5, 2]],
[[[5, 5]], [[1, 5]], [[6, 0]]],
],
output=[[[29, 35]], [[47, 55]]],
),
]
constants = []
description = "tensor multiplication like np.tensordot"
target_program = "torch.tensordot(in1, in2, dims=1)"
source = "https://stackoverflow.com/questions/43067338/tensor-multiplication-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_05",
)
def stackoverflow_06():
examples = [
benchmark.Example(
inputs=[
[3, 5, 0, 2, 3, 3, 0],
],
output=[
[1, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 0, 1],
],
# [
# [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0],
# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
# [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
# [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0],
# [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0],
# [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
# ],
),
]
constants = []
description = "binary tensor from vector indicating if elements are equal"
target_program = "torch.eq(in1, torch.unsqueeze(in1, dim=1)).float()"
source = "https://stackoverflow.com/questions/47816231/create-binary-tensor-from-vector-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_06",
)
def stackoverflow_07():
examples = [
benchmark.Example(
inputs=[
[
[[8, 4, 6], [2, 12, 3]],
[[11, 12, 5], [9, 12, 12]],
[[9, 2, 13], [7, 0, 7]],
[[2, 10, 5], [7, 1, 2]],
],
],
output=[
[[8, 4, 6], [11, 12, 5], [9, 2, 13], [2, 10, 5]],
[[2, 12, 3], [9, 12, 12], [7, 0, 7], [7, 1, 2]],
],
),
]
constants = []
description = "swap the first two dimensions of the tensor"
target_program = "torch.transpose(in1, 0, 1)"
source = (
"https://stackoverflow.com/questions/38212205/swap-tensor-axes-in-tensorflow"
)
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_07",
)
def stackoverflow_08():
examples = [
benchmark.Example(
inputs=[
# [-1, 0, -3, 2, 1, 3, 5, -1, -9, 2, 10],
[1, 0, 0, 2, 1, 3, 5, 0, 1, 2, 10],
[12, 3, 45, 6, 7, 8, 9, 87, 65, 4, 32],
1
],
output=[6, 8, 9, 4, 32],
),
]
constants = [1]
description = (
"select the values in the second tensor where the first "
"tensor is greater than 1"
)
target_program = "torch.masked_select(in2, torch.gt(in1, 1))"
source = "https://stackoverflow.com/questions/33769041/tensorflow-indexing-with-boolean-tensor"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_08",
)
@benchmark.ignore('Out of scope')
def stackoverflow_09():
examples = [
benchmark.Example(
inputs=[
[37, 42, 42, 37, 28, 15, 42, 15],
],
output=[0, 1, 1, 0, 2, 3, 1, 3],
),
]
constants = []
description = "group items by value and get the group indices"
target_program = """
original_unique = torch.masked_select(values, torch.tensor([values[i] not in values[:i] for i in range(values.size(0))]))
solution = torch.argsort(original_unique)[torch.unique(values, return_inverse=True)[1]]
"""
source = "https://stackoverflow.com/questions/53054668/assign-values-between-0-and-n-1-for-a-vector-of-length-l-with-n-different-eleme"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_09",
)
@benchmark.ignore('Out of scope - api3(api1, api2)')
def stackoverflow_10():
examples = [
benchmark.Example(
inputs=[
[[15, 10], [20, -5]],
[[2, 3, 1], [-2, 5, 0]],
],
output=[[[30, 45, 15], [20, 30, 10]], [[-40, 100, 0], [10, -25, 0]]],
),
]
constants = []
description = "perform matrix multiplication"
target_program = "torch.matmul(torch.unsqueeze(in1, -1), torch.unsqueeze(in2, 1))"
source = "https://stackoverflow.com/questions/53094212/tensorflow-sxn-matrix-multiply-with-sxd-matrix-to-output-sxnxd-array"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_10",
)
def stackoverflow_11():
examples = [
benchmark.Example(
inputs=[
[4, 0, 1, 1, 0, 4, 0, 0, 3, 4, 1],
],
output=[4, 3, 0, 1, 3],
),
]
constants = []
description = "count the number of occurences of each distinct number"
target_program = "torch.bincount(in1)"
source = "https://stackoverflow.com/questions/45194672/how-to-count-elements-in-tensorflow-tensor"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_11",
)
@benchmark.ignore('Out of scope - api3(api1, api2)')
def stackoverflow_12():
examples = [
benchmark.Example(
inputs=[[[12, 34, 56], [33, 22, 11]]], output=[[12, 56], [33, 11]]
),
]
constants = [0, 1, 2]
description = "remove a column from the tensor"
target_program = "torch.stack((in1[:,0], in1[:,2]), dim=1)"
source = "https://stackoverflow.com/questions/47447183/remove-a-set-of-tensors-from-a-tensor-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_12",
)
def stackoverflow_13():
examples = [
benchmark.Example(
inputs=[
[[3, 5], [10, 2]],
# [[[1, 0], [5, 4]], [[3, 10], [2, -2]]],
[[[1, 0], [5, 4]], [[3, 10], [2, 0]]]
],
output=[[[28, 20], [19, 30]], [[20, 8], [34, 100]]]
# [[[28, 20], [19, 20]], [[20, 8], [34, 96]]],
),
]
constants = []
description = "multiply vectors by tensor"
target_program = "torch.transpose(torch.matmul(in1, in2), 0, 1)"
source = "https://stackoverflow.com/questions/50777704/n-d-tensor-matrix-multiplication-with-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_13",
)
def stackoverflow_14():
examples = [
benchmark.Example(
inputs=[
[
[
[0, 0, 1],
[0, 0, 0],
[1, 0, 1],
[0, 1, 0],
[0, 0, 0],
[1, 1, 1],
[1, 1, 0],
# [False, False, True],
# [False, False, False],
# [True, False, True],
# [False, True, False],
# [False, False, False],
# [True, True, True],
# [True, True, False],
]
],
],
output=[[1, 0, 1, 1, 0, 1, 1]]
# [[True, False, True, True, False, True, True]],
),
]
constants = []
target_program = "torch.sum(in1, -1).bool()"
description = "choose True if any value in a row is True, False otherwise"
source = "https://stackoverflow.com/questions/35657003/aggregate-each-element-of-tensor-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_14",
)
def stackoverflow_15():
examples = [
benchmark.Example(
inputs=[
# [3, 1, 2, 0, 1, -1, 10, 1, -10],
[3, 1, 2, 0, 1, 0, 10, 1, 0],
1,
],
output=[3, 0, 2, 0, 0, 0, 10, 0, 0]
# [3, 0, 2, 0, 0, -1, 10, 0, -10],
),
]
constants = [0, 1]
description = "set all instances of 1 to 0"
target_program = "torch.where(torch.ne(in1,1), in1, 0)"
source = "https://stackoverflow.com/questions/39045797/conditional-assignment-of-tensor-values-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_15",
)
def stackoverflow_16():
examples = [
benchmark.Example(
inputs=[
# [[2, 5], [3, 0], [8, -7]],
[[2, 5], [3, 0], [8, 7]],
# [4, 10, -6],
[4, 10, 6]
],
output=[[8, 20], [30, 0], [48, 42]]
# [[8, 20], [30, 0], [-48, 42]],
),
]
constants = []
description = "multiply tensors across the first axis"
target_program = "torch.mul(in1, torch.unsqueeze(in2, 1))"
source = "https://stackoverflow.com/questions/46240646/tensor-multiply-along-axis-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_16",
)
def stackoverflow_17():
examples = [
benchmark.Example(
inputs=[
# [17, -32, 99],
[17, 32, 99]
],
output=[[17, 17], [32, 32], [99, 99]]
# [[17, 17], [-32, -32], [99, 99]],
),
]
constants = []
description = "duplicate each element of a tensor"
# StackOverflow answer doesn't work.
target_program = "torch.stack((in1, in1),1)"
source = "https://stackoverflow.com/questions/51761353/about-tensor-of-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_17",
)
def stackoverflow_18():
examples = [
benchmark.Example(
inputs=[
# shape=[2, 2, 3].
[[[1, 1, 1], [1, 0, 1]], [[1, 2, 3], [4, 5, 6]]],
# shape=[3, 4].
[[1, 1, 1, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
# shape=[4].
[100, 200, 300, 400],
],
# Shape=[sequence_length, batch_size, 4]=[2, 2, 4].
output=[
[[107, 209, 311, 413], [106, 207, 308, 409]],
[[118, 223, 328, 433], [139, 250, 361, 472]],
],
),
]
constants = []
description = "multiply 3D tensor and 2D tensor and add another tensor"
target_program = "torch.add(in3, torch.matmul(in1, in2))"
source = "https://stackoverflow.com/questions/38222126/tensorflow-efficient-way-for-tensor-multiplication"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_18",
)
@benchmark.ignore('Out of scope')
def stackoverflow_19():
examples = [
benchmark.Example(
inputs=[
[
[3, 1, 2],
[1, 0, 4],
[1, 2, 3],
[0, 5, 1],
[1, 1, 2],
[2, 3, 1],
[2, 1, 0],
],
],
output=[
[0, 5, 1],
[1, 0, 4],
[1, 1, 2],
[1, 2, 3],
[2, 1, 0],
[2, 3, 1],
[3, 1, 2],
],
),
]
constants = []
description = (
"sort a tensor considering the first column, breaking ties "
"using the second column"
)
target_program = """
second_sorted = in1[torch.sort(in1[:, 1])[1]]
solution = second_sorted[torch.sort(second_sorted[:, 0])[1]]
"""
source = "https://stackoverflow.com/questions/49399198/sort-a-tensor-based-on-two-columns-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_19",
)
def stackoverflow_20():
examples = [
benchmark.Example(
inputs=[
[
[7, 2, 1],
[4, 5, 1],
[4, 4, 2],
[3, 4, 3],
[0, 0, 1],
# [0.7, 0.2, 0.1],
# [0.4, 0.5, 0.1],
# [0.4, 0.4, 0.2],
# [0.3, 0.4, 0.3],
# [0.0, 0.0, 1.0],
],
],
output=[[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
),
]
constants = []
description = "compute argmax in each tensor and set it to 1"
target_program = "torch.nn.functional.one_hot(torch.argmax(in1, 1), in1.size(1))"
source = "https://stackoverflow.com/questions/44834739/argmax-on-a-tensor-and-ceiling-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_20",
)
def stackoverflow_21():
examples = [
benchmark.Example(
inputs=[
[[2], [0], [1], [0]],
# [[2], [0], [1], [0]],
[[2, 5, 3], [1, 3, 6], [1, 6, 3], [7, 0, 3]]
# [[0.2, 0.5, 0.3], [0.1, 0.3, 0.6], [0.1, 0.6, 0.3], [0.7, 0.0, 0.3]],
],
output=[[3], [1], [6], [7]],
# [[0.3], [0.1], [0.6], [0.7]],
),
]
constants = []
description = "gather elements in a tensor along axis 1"
target_program = "torch.gather(in2, 1, in1)"
source = "https://stackoverflow.com/questions/51690095/how-to-gather-element-with-index-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_21",
)
def stackoverflow_22():
examples = [
benchmark.Example(
inputs=[
[3, 1, 10],
[[6, 4], [5, 1], [3, 4]]
# [[0.6, 0.4], [0.5, 1.0], [3.0, 4.0]],
],
output=[53, 53]
# [32.3, 42.2],
),
]
constants = []
description = "multiply a vector with a matrix without reshaping the vector"
target_program = "torch.squeeze(torch.matmul(torch.unsqueeze(in1, 0).float(), in2))"
source = "https://stackoverflow.com/questions/43284897/how-can-i-multiply-a-vector-and-a-matrix-in-tensorflow-without-reshaping"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_22",
)
def stackoverflow_23():
# Simplified slightly because the user already knows how to do the mod part.
examples = [
benchmark.Example(
inputs=[
[[0, 5, 2], [3, 1, 4], [5, 1, 5]],
],
output=[
[1, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0],
],
),
]
constants = []
description = "place 1 at the indices in the input tensor"
target_program = (
"torch.max(torch.nn.functional.one_hot(in1, 9), 1, keepdim=False, out=None)[0]"
)
source = (
"https://stackoverflow.com/questions/53414433/tensorflow-tensor-binarization"
)
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_23",
)
def stackoverflow_24():
examples = [
benchmark.Example(
inputs=[
# [3.0, 1.0, 4.0, 5.0, 2.0, 8.0, -6.0, -7.0],
[3, 1, 4, 5, 2, 8, 6, 7],
# [0.5, 0.0, -2.0, 0.0, 1.0, -1.0, 0.0, 2.0],
[1, 0, 2, 0, 1, 1, 0, 2],
0
],
output=[3, 1, 2, 5, 2, 8, 6, 3.5]
# [6.0, 1.0, -2.0, 5.0, 2.0, -8.0, -6.0, -3.5],
),
]
constants = [0]
description = "like tf.divide(), but when dividing by 0, return the " "numerator"
target_program = "torch.where(torch.ne(in2, 0), torch.divide(in1, in2), in1)"
source = "https://stackoverflow.com/questions/53643339/tensorflow-overriding-tf-divide-to-return-the-numerator-when-dividing-by-0"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_24",
)
def stackoverflow_25():
examples = [
benchmark.Example(
inputs=[
3,
4,
],
output=[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
# [1.0, 0.0, 0.0],
# [0.0, 1.0, 0.0],
# [0.0, 0.0, 1.0],
# [1.0, 0.0, 0.0],
# [0.0, 1.0, 0.0],
# [0.0, 0.0, 1.0],
# [1.0, 0.0, 0.0],
# [0.0, 1.0, 0.0],
# [0.0, 0.0, 1.0],
# [1.0, 0.0, 0.0],
# [0.0, 1.0, 0.0],
# [0.0, 0.0, 1.0],
],
),
]
constants = []
description = "copy the tensor torch.eye(3), 4 times"
target_program = "torch.tile(torch.eye(in1), (in2, 1))"
source = "https://stackoverflow.com/questions/53602691/duplicate-a-tensor-n-times"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_25",
)
def stackoverflow_26():
examples = [
benchmark.Example(
inputs=[[[[3, 4], [1, 2]], [[5, 2], [10, 3]], [[10, 20], [4, 7]]]],
# [[[3, 4], [1, 2]], [[5, -2], [-10, 3]], [[10, 20], [-4, 7]]]],
output=[10, 20, 41]
# [10, -4, 33],
),
]
constants = []
description = "reduction operation for multiple dimensions simultaneously"
target_program = "torch.sum(torch.sum(in1, 1), 1)"
source = "https://stackoverflow.com/questions/54294780/how-to-perform-reduce-op-on-multiple-dimensions-at-once"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_26",
)
def stackoverflow_27():
examples = [
benchmark.Example(
inputs=[
[0, 3, 5, 6],
8,
],
output=[1, 0, 0, 1, 0, 1, 1, 0],
),
]
constants = []
description = "boolean tensor with 1 at the indices in the input tensor"
target_program = "torch.sum(torch.nn.functional.one_hot(in1, in2), 0)"
source = "https://stackoverflow.com/questions/54225704/how-do-i-get-a-tensor-representing-the-on-positions-in-the-original-tensor"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_27",
)
@benchmark.ignore('Out of scope')
def stackoverflow_28():
examples = [
benchmark.Example(
inputs=[
[
[[5, 3], [0, 2]],
[[7, 4], [5, 1]],
[[10, 20], [15, 30]],
[[11, 16], [14, 12]],
[[-2, -7], [-4, 6]],
],
[1, 0, 1, 1, 0],
],
output=[[3, 2], [7, 5], [20, 30], [16, 12], [-2, -4]],
),
]
constants = []
description = "extract columns from a 3D tensor given column indices"
target_program = "torch.transpose(in1, 1, 2)[torch.arange(in1.size(0)), in2, :]"
source = "https://stackoverflow.com/questions/54274074/selecting-columns-from-3d-tensor-according-to-a-1d-tensor-of-indices-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_28",
)
def stackoverflow_29():
examples = [
benchmark.Example(
inputs=[
[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21],
# [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0],
[12, 0, 10, 23, 16],
# [0.1, -10, -0.1, 1.1, 0.41],
],
output=[6, 0, 5, 11, 8],
),
]
constants = []
description = "place continuous values into buckets given bucket boundaries"
target_program = "torch.searchsorted(in1, in2)"
source = "https://stackoverflow.com/questions/54155085/bucketing-continous-value-tensors-in-tensorflow" # lint: NOTYPO
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_29",
)
def stackoverflow_30():
examples = [
benchmark.Example(
inputs=[
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[9.0, 4.0], [8.0, 5.0], [7.0, 6.0]],
],
output=[
[math.sqrt(68), math.sqrt(58), math.sqrt(52)],
[math.sqrt(36), math.sqrt(26), math.sqrt(20)],
[math.sqrt(20), math.sqrt(10), math.sqrt(4)],
],
),
]
constants = []
description = "compute Euclidean distance between two tensors"
# StackOverflow answer is incorrect.
target_program = "torch.cdist(in1, in2)"
source = "https://stackoverflow.com/questions/54147780/tensorflow-how-to-calculate-the-euclidean-distance-between-two-tensor"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_30",
)
@benchmark.ignore('Input contains sparse tensor')
def stackoverflow_31():
examples = [
benchmark.Example(
inputs=[
torch.sparse_coo_tensor(
indices=torch.tensor([[0, 0, 1], [0, 1, 1]]),
values=[1.0, 1.5, -2.0],
size=[2, 2],
),
[[3.0, 1.0], [0.2, -1.0]],
],
output=5.29,
),
]
constants = []
description = "squared error between two tensors, one being a sparse tensor"
target_program = "torch.sum(torch.square(torch.sub(in1.to_dense(), in2)))"
source = "https://stackoverflow.com/questions/45032668/tensorflow-how-to-compute-the-square-error-between-a-tensor-and-a-sparse-tensor"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_31",
)
def stackoverflow_32():
examples = [
benchmark.Example(
inputs=[
[[1, 6, 2, 1], [3, 1, 4, 2], [2, 1, 2, 5]]
# [[0.1, 0.6, 0.2, 0.1], [0.3, 0.1, 0.4, 0.2], [0.2, 0.1, 0.2, 0.5]],
],
output=[13, 15, 20]
# [1.3, 1.5, 2.0],
),
]
constants = []
description = "weighted sum across rows, where the column index is the weight"
target_program = "torch.sum(torch.mul(in1, torch.unsqueeze(torch.arange(in1.size(1)),0).expand(in1.size(0),-1)), 1)"
source = "https://stackoverflow.com/questions/48659449/how-to-compute-the-weighted-sum-of-a-tensor-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_32",
)
@benchmark.ignore('Out of scope')
def stackoverflow_33():
examples = [
benchmark.Example(
inputs=[
[
[0.3, 0.1, 0.4],
[0.1, 0.5, 0.9],
[0.2, 0.6, 0.5],
[0.3, 0.5, 0.8],
[0.9, 0.7, 0.9],
],
[[0.3, 0.2, 0.3], [0.8, 0.4, 0.6], [0.2, 0.6, 0.4], [0.3, 0.3, 0.8]],
],
output=[0.02, 0.19, 0.01, 0.04],
),
]
constants = []
description = "find the minimum distance between two sets of points"
target_program = "torch.min(torch.sum(torch.square(torch.sub(torch.unsqueeze(in1, 0), torch.unsqueeze(in2, 1))), 2), 1)[0]"
source = "https://stackoverflow.com/questions/40558251/computing-minimum-distance-for-each-element-in-a-tensor-relative-to-another-tens"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_33",
)
def stackoverflow_34():
examples = [
benchmark.Example(
inputs=[
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[10, 20], [30, 40]]],
[3, 5, 10],
],
output=[[128, 236], [344, 452]],
),
]
constants = []
description = "compute a weighted sum of tensors"
target_program = "torch.tensordot(in2, in1, 1)"
source = "https://stackoverflow.com/questions/49532371/compute-a-linear-combination-of-tensors-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_34",
)
@benchmark.ignore('Out of scope')
def stackoverflow_35():
examples = [
benchmark.Example(
inputs=[
[
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]],
],
[
[[9.0, 8.0], [7.0, 6.0], [5.0, 4.0]],
[[90.0, 80.0], [70.0, 60.0], [50.0, 40.0]],
],
[0.1, 0.4, 0.8],
],
output=[
[[8.2, 7.4], [5.4, 5.2], [5.0, 5.6]],
[[82.0, 74.0], [54.0, 52.0], [50.0, 56.0]],
],
),
]
constants = []
description = "linear interpolation between two tensors"
target_program = (
"torch.add(in2, torch.mul(torch.unsqueeze(in3, 1), torch.sub(in1, in2)))"
)
source = "https://stackoverflow.com/questions/49643371/keras-compute-convex-combination-of-two-tensors"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_35",
)
def stackoverflow_36():
examples = [
benchmark.Example(
inputs=[
[1, 0, 1, 1, 0, 1, 0, 1],
],
output=[1.0, 0.0, 0.333333, 0.25, 0.0, 0.166667, 0.0, 0.125],
),
]
constants = []
description = "divide each element by the column index"
target_program = "torch.div(in1, torch.arange(1,in1.size(0)+1))"
source = "https://stackoverflow.com/questions/43306788/divide-elements-of-1-d-tensor-by-the-corrispondent-index"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_36",
)
def stackoverflow_37():
examples = [
benchmark.Example(
inputs=[
[
[
[[10, 20, 30], [40, 50, 60]],
[[12, 34, 56], [78, 98, 76]],
# [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
# [[1.2, 3.4, 5.6], [7.8, 9.8, 7.6]],
]
],
# [0.5, 1.0, 2.0],
[5, 10, 20]
],
output=[[[850, 1900], [1520, 2890]]]
# [[[8.5, 19.0], [15.2, 28.9]]],
),
]
constants = []
description = "dot product a vector with last dimension of a tensor"
target_program = "torch.tensordot(in1, in2, 1)"
source = "https://stackoverflow.com/questions/49206051/multiply-4-d-tensor-with-1-d-tensor"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_37",
)
@benchmark.ignore('Out of scope')
def stackoverflow_38():
# To simplify the problem, and to get more than one number as output, this
# doesn't include the final reduce_sum step.
examples = [
benchmark.Example(
inputs=[
[9, 2, 5, 3, 7, 4],
[[0, 0, 1, 0, 1, 0], [1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 1]],
],
output=[35, 9, 120],
),
]
constants = []
description = "compute the product of marked elements"
target_program = "torch.prod(torch.maximum(torch.max(in2), torch.mul(in1, in2)), 1)"
source = "https://stackoverflow.com/questions/49511529/tensorflow-compute-multiplication-by-binary-matrix"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_38",
)
def stackoverflow_39():
examples = [
benchmark.Example(
inputs=[
# [[-1.5, 1.0, 0.9, 2.0], [1.1, 0.0, -0.1, -0.9], [-1.0, 0.1, -1.1, 2.5]],
[[15, 10, 9, 20], [11, 0, 1, 9], [10, 1, 11, 25]]
],
output=[
[225, 100, 81, 400],
[121, 0, 1, 81],
[100, 1, 121, 625],
# [2.25, 1.0, 0.0, 4.0],
# [1.21, 0.0, 0.0, 0.0],
# [1.0, 0.0, 1.21, 6.25],
],
),
]
constants = []
description = (
"set to 0 the elements with absolute value less than 1, and "
"square the other elements"
)
target_program = (
"torch.square(torch.where(torch.lt(torch.abs(in1), 1), torch.tensor(0.), in1))"
)
source = "https://stackoverflow.com/questions/37912161/how-can-i-compute-element-wise-conditionals-on-batches-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_39",
)
@benchmark.ignore('Target program contains sparse tensor.')
def stackoverflow_40():
examples = [
benchmark.Example(
inputs=[
[4, 5, 2, 7, 8, 6],
[[0, 2], [0, 4], [1, 1], [1, 3], [2, 0], [2, 3]],
],
output=[[0, 0, 4, 0, 5], [0, 2, 0, 7, 0], [8, 0, 0, 6, 0]],
),
]
constants = []
description = "use the output of tf.nn.top_k to make a sparse tensor"
target_program = (
"torch.sparse_coo_tensor(torch.transpose(in2, 0, 1), in1, (3,5)).to_dense()"
)
source = "https://stackoverflow.com/questions/43996831/make-a-sparse-tensor-based-on-the-output-of-tf-nn-top-k"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_40",
)
def stackoverflow_41():
examples = [
benchmark.Example(
inputs=[
[5, 2, 8, 2, 4, 1, 1, 0, 2, 1],
3,
],
output=[5, 2, 8, 4, 1, 1, 0, 2, 1],
),
]
constants = []
description = "copy all elements except at the given index"
target_program = "torch.masked_select(in1, torch.ne(torch.arange(in1.size(0)), 3))"
source = "https://stackoverflow.com/questions/54499051/elegant-way-to-access-python-list-and-tensor-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_41",
)
def stackoverflow_42():
examples = [
benchmark.Example(
inputs=[
# [4, 6, 2, 6, 7, 3, -3],
[4, 6, 2, 6, 7, 3, 3],
7
],
output=[0, 0, 0, 0, 1, 0, 0],
),
]
constants = []
description = "create a binary vector where the max element is 1"
target_program = "torch.where(torch.eq(torch.max(in1),in1), 1, 0)"
source = "https://stackoverflow.com/questions/54493814/binary-vector-of-max"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_42",
)
@benchmark.ignore('Out of scope')
def stackoverflow_43():
examples = [
benchmark.Example(
inputs=[
[
[12, 34, 56, 78, 90, 10],
[99, 88, 77, 55, 44, 33],
[-1, -2, -3, -4, -5, -6],
],
[0, 1, 1, 0, 2, 0],
],
output=[12, 88, 77, 78, -5, 10],
),
]
constants = []
description = "extract elements of a tensor given row indices"
target_program = "torch.squeeze(torch.gather(torch.transpose(in1, 0, 1), 1, torch.unsqueeze(in2, 1)))"
source = "https://stackoverflow.com/questions/54455169/better-way-to-access-individual-elements-in-a-tensor"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_43",
)
def stackoverflow_44():
examples = [
benchmark.Example(
inputs=[
[
# [3, 5, 2],
# [6, 2, 3],
# [8, 7, 1],
# [0, -3, 5],
# [-4, 7, 3],
# [2, 1, 6],
# [10, 20, 30],
# [4, 5, 6],
[3, 5, 2],
[6, 2, 3],
[8, 7, 1],
[0, 3, 5],
[4, 7, 3],
[2, 1, 6],
[10, 20, 30],
[4, 5, 6],
],
],
output=[[9, 7, 5], [8, 19, 6], [6, 8, 9], [14, 25, 36]]
# [[9, 7, 5], [8, 4, 6], [-2, 8, 9], [14, 25, 36]],
),
]
constants = [2]
description = "sum across columns for pairs of consecutive rows"
target_program = "torch.sum(torch.reshape(in1, (-1, 2, in1.size(1))), 1)"
source = "https://stackoverflow.com/questions/54402389/sum-the-columns-for-each-two-consecutive-rows-of-a-tensor-of-3-dimensions"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_44",
)
def stackoverflow_45():
examples = [
benchmark.Example(
inputs=[
[1, 0, 1, 0, 1],
[[[12, 34], [56, 78], [23, 54], [76, 78], [42, 24]]],
],
output=[[[34, 12], [56, 78], [54, 23], [76, 78], [24, 42]]],
),
]
constants = []
description = "reverse the order in the marked rows"
target_program = (
"torch.where(torch.unsqueeze(in2,1).bool(), torch.roll(in1, 1, -1), in1)"
)
source = "https://stackoverflow.com/questions/54337925/reverse-order-of-some-elements-in-tensorflow"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_45",
)
def stackoverflow_46():
examples = [
benchmark.Example(
inputs=[
[3, 4, 1],
],
output=[0, 0, 0, 1, 1, 1, 1, 2],
),
]
constants = []
description = "convert segment lengths to segment ids"
target_program = """
mask = torch.arange(torch.max(in1)).expand(in1.size(0), torch.max(in1)) < torch.unsqueeze(in1, dim=1)
solution = torch.mul(torch.unsqueeze(torch.arange(mask.size(0)), 1), mask)
solution = torch.masked_select(solution, mask)
"""
source = "https://stackoverflow.com/questions/58652161/how-to-convert-2-3-4-to-0-0-1-1-1-2-2-2-2-to-utilize-tf-math-segment-sum"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_46",
)
@benchmark.ignore('Out of scope')
def stackoverflow_47():
examples = [
benchmark.Example(
inputs=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[
[True, True, True, False, False],
[True, True, False, False, False],
[True, True, True, True, True],
[True, True, True, True, False],
[True, False, False, False, False],
[True, True, False, False, False],
],
],
output=[
[0, 1, 2, 0, 0],
[3, 4, 0, 0, 0],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 0],
[14, 0, 0, 0, 0],
[15, 16, 0, 0, 0],
],
),
]
constants = []
description = "put given values into a sequence mask"
target_program = """
mask = torch.reshape(in2, [-1])
solution = torch.reshape(torch.where(mask, torch.sub(torch.cumsum(mask, 0), 1), torch.tensor(0)), in2.size())
"""
source = "https://stackoverflow.com/questions/58641546/how-can-i-put-the-sequential-values-to-the-sequence-mask"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_47",
)
def stackoverflow_48():
examples = [
benchmark.Example(
inputs=[
[32, 53, 45, 38, 29, 89, 64, 23],
[38, 53, 89, 38, 32, 64],
],
output=[3, 1, 5, 3, 0, 6],
),
]
constants = []
description = "find the indices of all elements"
target_program = "torch.argmax(torch.eq(in1, torch.unsqueeze(in2, 1)).int(), 1)"
source = "https://stackoverflow.com/questions/58481332/getting-the-indices-of-several-elements-in-a-tensorflow-at-once"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_48",
)
def stackoverflow_49():
examples = [
benchmark.Example(
inputs=[
# Shape = [3, 1, 2, 3].
# [
# [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
# [[[0.8, 1.0, 0.0], [0.6, 0.4, 0.2]]],
# [[[0.9, 0.8, 0.7], [0.1, 0.2, 0.3]]],
# ],
[
[[[1, 2, 3], [4, 5, 6]]],
[[[8, 10, 0], [6, 4, 2]]],
[[[9, 8, 7], [1, 2, 3]]],
],
# [2.0, 0.5, 1.0],
[20, 5, 10]
],
output=[
[[[20, 40, 60], [80, 100, 120]]],
[[[40, 50, 0], [30, 20, 10]]],
[[[90, 80, 70], [10, 20, 30]]],
# [[[0.2, 0.4, 0.6], [0.8, 1.0, 1.2]]],
# [[[0.4, 0.5, 0.0], [0.3, 0.2, 0.1]]],
# [[[0.9, 0.8, 0.7], [0.1, 0.2, 0.3]]],
],
),
]
constants = []
description = "multiply tensors by scalars in a batched way"
target_program = "torch.transpose(torch.mul(in2, torch.transpose(in1, 0, 3)), 0, 3)"
source = "https://stackoverflow.com/questions/58466562/given-a-batch-of-n-images-how-to-scalar-multiply-each-image-by-a-different-scal"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_49",
)
def stackoverflow_50():
examples = [
benchmark.Example(
inputs=[
# 5, # Rows.
# 6, # Columns.
3, # Index of nonzero column.
],
output=[
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
],
),
]
constants = []
description = "create a binary matrix where a specified column is set to one"
target_program = "torch.zeros((in1, in2), dtype=torch.int)"
source = "https://stackoverflow.com/questions/58537495/tensorflow-initialize-a-sparse-tensor-with-only-one-line-column-not-zero"
return benchmark.Benchmark(
examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name="stackoverflow_50",
)
# # A template for easy copy/pasting. Copying an existing benchmark and replacing
# # parts of it will lead to a state where the benchmark is half-correct, but not
# # obviously so. Copy this template instead when creating new benchmarks.
# """
# def stackoverflow_NUMBER():
# examples = [
# benchmark.Example(
# inputs=[
# INPUT_1,
# INPUT_2,
# ],
# output=OUTPUT
# ),
# ]
# constants = [CONSTANTS]
# description = 'DESCRIPTION'
# target_program = 'SOLUTION_PROGRAM'
# source = 'PROBLEM_SOURCE'
# return benchmark.Benchmark(examples=examples,
# constants=constants,
# description=description,
# target_program=target_program,
# source=source,
# name='stackoverflow_NUMBER')
# """ # pylint: disable=pointless-string-statement
|
APIsynth-master
|
Synthesis_incorporation/benchmarks/stackoverflow_benchmarks.py
|
# Copyright 2021 The TF-Coder Authors.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Exhaustive value search (enumerating by weight of expression)."""
import collections
import keyword
import re
import sys
import timeit
import tokenize
from typing import Any, Dict, List, NamedTuple, Optional, Set, Text, Tuple, Union
import random
from itertools import product
import numpy as np
import six
import torch
from absl import logging
from tf_coder import torch_functions
from tf_coder.benchmarks import benchmark as benchmark_module
from tf_coder.natural_language import description_handler as description_handler_module
from tf_coder.models import prediction_model
from tf_coder.repair import snippet_handler as snippet_handler_module
from tf_coder.value_search import all_operations
from tf_coder.value_search import filtered_values_cache
from tf_coder.value_search import operation_base
from tf_coder.value_search import operation_filtering
from tf_coder.value_search import operation_statistics
from tf_coder.value_search import value as value_module
from tf_coder.value_search import value_search_settings as settings_module
from tf_coder.natural_language import description_handler_factory
from tf_coder.models import prediction_model_factory
from tf_coder.repair import snippet_handler_factory
ValuesByWeight = operation_base.ValuesByWeightDict
DescriptionHandler = description_handler_module.DescriptionHandler
PredictionModel = prediction_model.ClassificationModel
SnippetHandler = snippet_handler_module.SnippetHandler
Solution = NamedTuple(
"Solution",
[
("value", value_module.Value),
("expression", Text),
("weight", int),
("time", float),
],
)
ValueSearchResults = NamedTuple(
"ValueSearchResults",
[
("solutions", List[Solution]),
("total_time", float),
("value_set", Set[value_module.Value]),
("values_by_weight", ValuesByWeight),
("benchmark", benchmark_module.Benchmark),
("settings", settings_module.Settings),
("statistics", Optional[operation_statistics.OperationStatistics]),
],
)
def _suppress_warnings() -> None:
"""Suppress TensorFlow and Numpy warnings."""
# TensorFlow will produce tons of error logging because we often apply
# TensorFlow operations with bad arguments. Suppressing logging noticeably
# improves performance.
logging.set_verbosity(logging.ERROR)
# Numpy sometimes produces warnings for overflow, etc., which can be
# distracting.
np.seterr(all="ignore")
def _user_inputs(inputs: Union[Dict[Text, Any], List[Any]]) -> List[Any]:
"""Takes the inputs dict or list and extracts the input tensors."""
if isinstance(inputs, list):
return inputs
elif isinstance(inputs, dict):
return list(inputs.values())
elif isinstance(inputs, tuple):
return list(inputs)
else:
raise ValueError(
"inputs must be a list or dict, but is {}".format(type(inputs))
)
def _contains_sparse(benchmark: benchmark_module.Benchmark) -> bool:
"""Returns whether the benchmark involves SparseTensors."""
# TODO(kshi): These heuristics are okay, but we should let the user choose if
# they want to.
for example in benchmark.examples:
if isinstance(example.output, torch.Tensor):
if example.output.is_sparse:
return True
for input_object in _user_inputs(example.inputs):
if isinstance(input_object, torch.Tensor):
if input_object.is_sparse:
return True
return "sparse" in benchmark.description.lower()
def _add_value_by_weight(
values_by_weight: ValuesByWeight, value: value_module.Value, weight: int
) -> None:
"""Adds a value of a given weight to values_by_weight."""
if weight < len(values_by_weight):
values_by_weight[weight][value] = value
def _constant_exists(constant: Any, constants_so_far: Set[Any]) -> bool:
"""Checks whether a constant exists already."""
# We can't use the `in` keyword because `True in [1, 2, 3]` evaluates to True!
# (`True == 1` evaluates to True.)
return any(
constant == existing and type(constant) is type(existing)
for existing in constants_so_far
)
def _is_valid_name(name: Text) -> bool:
"""Returns whether name is an acceptable Python identifier."""
# Behavior is slightly different between Python versions, e.g., `await` is a
# keyword only in PY3, and `print` is keyword only in PY2.
if name in ["torch", "np"] or keyword.iskeyword(name):
return False
if six.PY3:
return name.isidentifier()
else:
return bool(re.match(tokenize.Name + "$", name)) and name not in [
"True",
"False",
"None",
]
def _input_names_to_objects(
inputs_collection: Union[List[Any], Dict[Text, Any]]
) -> Dict[Text, Any]:
"""Returns a mapping from input names to objects, also validating names."""
if isinstance(inputs_collection, (list, tuple)):
input_names_to_objects = collections.OrderedDict(
("in" + str(i + 1), input_object)
for i, input_object in enumerate(inputs_collection)
)
elif isinstance(inputs_collection, dict):
for name in inputs_collection:
if not isinstance(name, six.string_types):
raise ValueError("The input name {!r} must be a string.".format(name))
if not _is_valid_name(name):
raise ValueError(
"The input name {!r} is not a valid Python identifier.".format(name)
)
input_names_to_objects = inputs_collection
else:
raise ValueError(
"The collection of inputs has the wrong format. It can be "
"a list of input objects, or a dict mapping string names "
"to input objects."
)
return input_names_to_objects
def _add_constants_and_inputs_and_print(
values_by_weight: ValuesByWeight,
benchmark: benchmark_module.Benchmark,
output_value: value_module.OutputValue,
constant_operation: operation_base.Operation,
settings: settings_module.Settings,
multipliers: Optional[Dict[Text, float]] = None
) -> None:
"""Adds constant/input Values to values_by_weight, and prints to stdout."""
# Conceptually this is a set, but it's actually a list so that constants can
# be printed in the same order they are chosen by the heuristics. The reduced
# efficiency of membership-checking is not a big deal because we have few
# constants.
constants_so_far = set()
constants_to_print = []
# User-provided constants.
for c in benchmark.constants:
if not _constant_exists(c, constants_so_far):
constant_value = value_module.ConstantValue(c)
weight = torch_functions.PROVIDED_CONSTANT_WEIGHT
if multipliers:
weight = max(1, int(round(weight * multipliers.get(str(constant_value.value), 1))))
_add_value_by_weight(values_by_weight, constant_value, weight)
constants_so_far.add(c)
constants_to_print.append(c)
# Add inputs, while computing some info for extra constants later.
max_input_tensor_rank = 0
dimension_lengths = set()
input_names_to_objects = _input_names_to_objects(benchmark.examples[0].inputs)
for name, input_object in input_names_to_objects.items():
input_value = value_module.InputValue(input_object, name)
if input_value.is_tensor:
max_input_tensor_rank = max(max_input_tensor_rank, len(input_value.shape))
dimension_lengths.update(input_value.shape)
if input_value.is_primitive and constant_operation is not None:
scalar_tensor_value = constant_operation.apply([input_value], settings)
weight = torch_functions.PRIMITIVE_INPUT_AS_TENSOR_WEIGHT
if multipliers:
weight = max(1, int(round(weight * multipliers.get(name, 1))))
_add_value_by_weight(
values_by_weight,
scalar_tensor_value,
weight,
)
weight = torch_functions.INPUT_VARIABLE_WEIGHT
if multipliers:
weight = max(1, int(round(weight * multipliers.get(name, 1))))
_add_value_by_weight(
values_by_weight, input_value, weight
)
if input_value.is_primitive:
constants_so_far.add(input_value.value)
constants_to_print.append(input_value.value)
if settings.printing.print_examples:
print(
"Input '{}'-{}:\n{!s}\n".format(name, input_value.type, input_value.value)
)
if output_value.shape is not None:
dimension_lengths.update(output_value.shape)
# Always include these as constants.
common_constants = [0, 1, -1]
# common_constants = [0, 1, -1, True, False]
# Also include 2, 3, ..., max_example_input_tensor_rank - 1 when applicable.
axis_constants = list(range(2, max_input_tensor_rank))
# Also include dimension lengths of input and output tensors.
shape_constants = sorted(dimension_lengths)
constant_weight_pairs = (
[(c, torch_functions.COMMON_CONSTANT_WEIGHT) for c in common_constants]
+ [(c, torch_functions.AXIS_CONSTANT_WEIGHT) for c in axis_constants]
+ [(c, torch_functions.SHAPE_CONSTANT_WEIGHT) for c in shape_constants]
)
for constant, weight in constant_weight_pairs:
if not _constant_exists(constant, constants_so_far):
constant_value = value_module.ConstantValue(constant)
if multipliers:
weight = max(1, int(round(weight * multipliers.get(str(constant_value.value), 1))))
_add_value_by_weight(values_by_weight, constant_value, weight)
constants_so_far.add(constant)
constants_to_print.append(constant)
if output_value.shape:
# Add the output shape as a constant.
shape_tuple = tuple(output_value.shape)
shape_tuple_value = value_module.ConstantValue(shape_tuple)
weight = torch_functions.OUTPUT_SHAPE_TUPLE_WEIGHT
if multipliers:
weight = max(1, int(round(weight * multipliers.get(str(shape_tuple_value.value), 1))))
_add_value_by_weight(values_by_weight, shape_tuple_value, weight)
# Don't add shape_tuple to constants_to_print, because printing it out could
# be confusing to users.
# Only for experiments in the PLDI paper.
if settings.paper_experiments.uniform_weights:
# Count the number of values.
num_values = sum(
len(values_with_weight) for values_with_weight in values_by_weight
)
# Take all values and put them in the collection for weight 1.
for weight in range(2, len(values_by_weight)):
for heavy_value in values_by_weight[weight]:
values_by_weight[1][heavy_value] = heavy_value
values_by_weight[weight].clear()
# Make sure we did it right.
for weight, values_with_weight in enumerate(values_by_weight):
assert len(values_with_weight) == (num_values if weight == 1 else 0)
if settings.printing.print_examples:
print("Output-{}:\n{!s}\n".format(output_value.type, output_value.value))
print("Constants: {!r}\n".format(constants_to_print))
if benchmark.snippet:
print("Original snippet: {!r}\n".format(benchmark.snippet))
if benchmark.target_program:
print("Target snippet: {!r}\n".format(benchmark.target_program))
if benchmark.description:
print("Description: {}\n".format(benchmark.description))
print("Searching...\n")
sys.stdout.flush() # Flush so the inputs/output appear in Colab immediately.
def _check_solution(
expression: Text,
used_input_names: Set[Text],
benchmark: benchmark_module.Benchmark,
settings: settings_module.Settings,
) -> bool:
"""Checks that the solution is good."""
del expression # Unused for now.
if settings.require_all_inputs_used:
if len(used_input_names) < len(benchmark.examples[0].inputs):
return False
elif settings.require_one_input_used:
if not used_input_names:
return False
# TODO(kshi): Check that the solution works (floating-point errors may
# accumulate beyond an acceptable threshold).
return True
def _record_solutions(
value: value_module.Value,
weight: int,
start_time: float,
solutions: List[Solution],
solution_expression_set: Set[Text],
benchmark: benchmark_module.Benchmark,
settings: settings_module.Settings,
) -> None:
"""Records new solutions in the `solutions` list."""
reconstructions = value.reconstruct_all_expressions_with_input_names()
this_solution_time = timeit.default_timer() - start_time
for expression, used_input_names in reconstructions:
if expression in solution_expression_set:
continue
if not _check_solution(expression, used_input_names, benchmark, settings):
if settings.printing.bad_solutions:
print("Bad solution: {}".format(expression))
continue
solution_expression_set.add(expression)
solutions.append(
Solution(
value=value,
expression=expression,
weight=weight,
time=this_solution_time,
)
)
if settings.printing.print_solutions:
print("Found solution: {}".format(expression))
# Flush so the solutions appear in Colab immediately.
sys.stdout.flush()
if len(solutions) >= settings.max_solutions:
break
def _check_solution_found(value, output_value, benchmark,
weight, start_time, end_time,
solutions, solution_expression_set, settings, is_prediction=False):
possible_first_solution = not solutions
if settings.printing.print_solutions:
if is_prediction:
print("Found Solution using prediction")
else:
print("Found Solution from enumerative search")
# Found solution(s), but some may be bad.
_record_solutions(
value,
weight,
start_time,
solutions,
solution_expression_set,
benchmark,
settings,
)
if possible_first_solution and solutions:
end_time = min(
end_time,
timeit.default_timer()
+ settings.max_extra_solutions_time,
)
return end_time
def _find_solutions_multi_model(
benchmark: benchmark_module.Benchmark,
operations: List[operation_base.Operation],
start_time: float,
settings: settings_module.Settings,
prediction_model: Optional[PredictionModel] = None,
snippet_constant_multipliers: Optional[Dict[Text, float]] = None
) -> Tuple[
List[Solution],
Set[value_module.Value],
ValuesByWeight,
Optional[operation_statistics.OperationStatistics],
]:
"""Helper, returning (solutions, value_set, values_by_weight, statistics)."""
timeout_reached = False
end_time = start_time + settings.timeout
only_minimal_solutions = settings.only_minimal_solutions
if settings.max_solutions == 1:
# If we only want one solution, it will be minimal.
only_minimal_solutions = True
# An object to track statistics, if requested.
statistics = (
operation_statistics.OperationStatistics()
if settings.printing.statistics
else None
)
# A list of Solution namedtuples.
solutions = []
# A set of string solution expressions (don't return duplicate solutions).
solution_expression_set = set()
# The output value to search for.
output_value = value_module.OutputValue(benchmark.examples[0].output)
# A list of OrderedDicts mapping Value objects to themselves. The i-th
# OrderedDict contains all Value objects of weight i.
values_by_weight = [
collections.OrderedDict() for _ in range(settings.max_weight + 1)
]
# Find and cache the constant and casting operations for use later.
constant_operation = None
int_operation = None
float_operation = None
bool_operation = None
for operation in operations:
if operation.name == torch_functions.CONSTANT_OPERATION_NAME:
constant_operation = operation
elif operation.name == torch_functions.INT_OPERATION_NAME:
int_operation = operation
elif operation.name == torch_functions.FLOAT_OPERATION_NAME:
float_operation = operation
elif operation.name == torch_functions.BOOL_OPERATION_NAME:
bool_operation = operation
# Create the output dtype value for use later.
dtype_value = value_module.ConstantValue(output_value.dtype)
# Populate values_by_weight with inputs and constants. This also prints
# inputs/output/constants to stdout.
_add_constants_and_inputs_and_print(
values_by_weight, benchmark, output_value, constant_operation, settings, snippet_constant_multipliers
)
# A set storing all values found so far.
value_set = set().union(*values_by_weight)
constants_values = [value for value in value_set if not value.is_tensor]
input_values = [value for value in value_set if isinstance(value, value_module.InputValue)]
value_trial_list = []
value_trial_list.extend([[value] for value in input_values])
double_products = product(list(input_values), list(input_values))
double_products = [list(p) for p in double_products]
value_trial_list.extend(double_products)
# TODO(daye): update this to cover every combination, with smarter prioritization
# Current version covers all the benchmark cases.
# It might be better to ignore some combinations, give up some examples that will
# take long time either way (i.e., complicated ones)
example_trial_list = []
# single input tensor, 1 api call - [in1]
single_1 = [[{"inputs": [value], "output": output_value}] for value in input_values]
example_trial_list.extend(single_1)
# double input tensor, 1 api call - [in1, in2]
double_products = product(list(input_values), list(input_values))
double_products = [list(p) for p in double_products]
double_1 = [[{"inputs": values, "output": output_value}] for values in double_products]
example_trial_list.extend(double_1)
# double input tensor, 2 api calls - [in1], [in2, 0]
double_2 = [[{"inputs": [values[0]], "output": output_value},{"inputs": [values[1], 0], "output": output_value}] for values in double_products]
example_trial_list.extend(double_2)
# double input tensor, 2 api calls, output1 being the only input to api2. - [in1, in2], [0]
double_2_1 = [[{"inputs": [values[0], values[1]], "output": output_value},{"inputs": [0], "output": output_value}] for values in double_products]
example_trial_list.extend(double_2_1)
triple_products = product(list(input_values), list(input_values), list(input_values))
triple_products = [list(p) for p in triple_products]
# [in1, in2], [in3, 0]
triple_2 = [[{"inputs": [values[0], values[1]], "output": output_value},{"inputs": [values[2], 0], "output": output_value}] for values in triple_products]
example_trial_list.extend(triple_2)
# single input tensor, 2 api calls - [in1], [0]
single_2 = [[{"inputs": [value], "output": output_value},{"inputs": [0], "output": output_value}] for value in input_values]
example_trial_list.extend(single_2)
# # double input tensor, 2 api calls, output1 being the first input to api2. - [in1], [0, in1]
# double_2_1 = [[{"inputs": [values[0]], "output": output_value},{"inputs": [0, values[1]], "output": output_value}] for values in double_products]
# example_trial_list.extend(double_2_1)
# # double input tensor, 2 api calls - [in1], [in2], [0, 0, in2]
double_2 = [[{"inputs": [values[0]], "output": output_value},{"inputs": [values[1]], "output": output_value},{"inputs": [0, 0, values[2]], "output": output_value}] for values in triple_products]
example_trial_list.extend(double_2)
# # single input tensor, 2 api calls - [], [in1]
# single_2 = [[{"inputs": [], "output": output_value},{"inputs": [value], "output": output_value}] for value in input_values]
# example_trial_list.extend(single_2)
# # double input tensor, 3 api calls, first api input to be none. - [], [in1], [0, in2]
# double_3 = [[{"inputs": [], "output": output_value},{"inputs": [values[0]], "output": output_value},{"inputs": [0, values[1]], "output": output_value}] for values in double_products]
# example_trial_list.extend(double_3)
# for values in value_trial_list:
for example_sequence in example_trial_list:
result_values = set()
predicted_sequences = prediction_model.get_predicted_sequence(example_sequence=example_sequence, settings=settings)
# predicted_sequences: [sequence, sequence, ...]
# : [[operation, operation, ...], [operation, operation ...]]
for sequence in predicted_sequences:
if settings.printing.predicted_operations:
print("sequence: {}".format([op.name for op in sequence]))
# intermediate_values = set(value_set)
intermediate_values = []
prev_intermediate_values = []
# sequence: [operation, operation, ...]
for i_op, operation in enumerate(sequence):
intermediate_values = []
new_intermediate_values = set()
if i_op < len(example_sequence):
cur_api_inputs = example_sequence[i_op]["inputs"]
# print("Cur API Input")
# print("With example, inputs: [{}],".format(", ".join([i.reconstruct_expression() if isinstance(i, value_module.Value) else str(i) for i in example_sequence[i_op]['inputs']])))
# 0 is a placeholder for the previous api's output.
if 0 not in cur_api_inputs:
intermediate_values.append(cur_api_inputs)
# cur_api_inputs = [cur_api_inputs+[i_value] for i_value in intermediate_values]
# intermediate_values.extend(cur_api_inputs)
elif cur_api_inputs.count(0) == 1:
# for this version, there will be at most one 0 in each api input
cur_intermediate_values = []
for in_value in prev_intermediate_values:
intermediate_value = []
for iv in cur_api_inputs:
if iv == 0:
intermediate_value.append(in_value)
else:
intermediate_value.append(iv)
cur_intermediate_values.append(intermediate_value)
# intermediate_values.append([[in_value] if cur_api_inputs[iv] == 0 else cur_api_inputs[iv] for iv in range(len(cur_api_inputs))])
# print(intermediate_value)
# intermediate_values.append(intermediate_value)
# cur_api_inputs = [[i_value]+cur_api_inputs[1:] for i_value in intermediate_values]
intermediate_values.extend(cur_intermediate_values)
elif cur_api_inputs.count(0) == 2:
# for this version, there will be at most one 0 in each api input
cur_intermediate_values = []
for in_values in product(prev_intermediate_values, prev_intermediate_values):
intermediate_value = []
in_value_idx = 0
for iv in cur_api_inputs:
if iv == 0:
intermediate_value.append(in_values[in_value_idx])
in_value_idx += 1
else:
intermediate_value.append(iv)
cur_intermediate_values.append(intermediate_value)
# intermediate_values.append([[in_value] if cur_api_inputs[iv] == 0 else cur_api_inputs[iv] for iv in range(len(cur_api_inputs))])
# print(intermediate_value)
# intermediate_values.append(intermediate_value)
# cur_api_inputs = [[i_value]+cur_api_inputs[1:] for i_value in intermediate_values]
intermediate_values.extend(cur_intermediate_values)
if settings.printing.verbose:
print("availalbe input for API-{}".format(i_op))
print([i.reconstruct_expression() if isinstance(i, value_module.Value) else i for i in intermediate_values])
for intermediate_value in intermediate_values:
if len(intermediate_value) == 2 and operation.name in ['torch.mul(input, other)']:
new_values = []
for value in intermediate_value:
if value.is_tensor and value.value.dtype == torch.bool:
new_values.append(all_operations.find_operation_with_name('IntOperation').apply([value], settings))
else:
new_values.append(value)
intermediate_value = new_values
elif len(intermediate_value) == 3 and operation.name in ['torch.where(condition, input, other)', 'torch.where(condition, self, other)']:
if intermediate_value[0].is_tensor and intermediate_value[0].value.dtype != torch.bool:
intermediate_value[0] = all_operations.find_operation_with_name('BoolOperation').apply([intermediate_value[0]], settings)
if not isinstance(intermediate_value, list):
intermediate_value = [[intermediate_value]]
else:
intermediate_value = [[v] for v in intermediate_value]
predicted_values = operation.enumerate_values_with_values(
given_values=intermediate_value,
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
for predicted_value in predicted_values:
if predicted_value not in value_set:
if settings.printing.verbose:
expression = predicted_value.reconstruct_expression()
print("{} produces:\n{}".format(expression, predicted_value))
if predicted_value == output_value:
end_time = _check_solution_found(predicted_value, output_value, benchmark,
0, start_time, end_time,
solutions, solution_expression_set, settings, True)
if len(solutions) >= settings.max_solutions:
return (
solutions,
value_set,
values_by_weight,
statistics,
)
elif all_operations.find_operation_with_name('IntOperation').apply([predicted_value], settings) == output_value:
end_time = _check_solution_found(predicted_value, output_value, benchmark,
0, start_time, end_time,
solutions, solution_expression_set, settings, True)
if len(solutions) >= settings.max_solutions:
return (
solutions,
value_set,
values_by_weight,
statistics
)
else:
new_intermediate_values.add(predicted_value)
# do casting to new values
if i_op == len(sequence)-1:
result_values.add(predicted_value)
prev_intermediate_values += list(new_intermediate_values)
if timeit.default_timer() > end_time:
timeout_reached = True
# Don't return immediately; still try to cast new values because this is
# relatively quick.
break
# Try casting new values to the output dtype if this has a chance of being
# a correct solution.
for new_value in result_values:
if (new_value.shape == output_value.shape
and new_value.dtype != output_value.dtype
and operation_filtering.is_castable(new_value, dtype_value)
):
casted_value = None
if output_value.dtype == torch.int:
casted_value = int_operation.apply([new_value], settings)
elif output_value.dtype == torch.bool:
casted_value = bool_operation.apply([new_value], settings)
elif output_value.dtype == torch.float:
casted_value = float_operation.apply([new_value], settings)
if casted_value == output_value:
possible_first_solution = not solutions
# Found solution(s), but some may be bad.
_record_solutions(
casted_value,
0,
start_time,
solutions,
solution_expression_set,
benchmark,
settings,
)
if possible_first_solution and solutions:
end_time = min(
end_time,
timeit.default_timer() + settings.max_extra_solutions_time,
)
if len(solutions) >= settings.max_solutions:
return solutions, value_set, values_by_weight, statistics
if settings.printing.progress:
print(
"Found {} distinct values of weight {}, or {} total.".format(
len(result_values), 0, len(value_set)
)
)
if only_minimal_solutions and solutions:
return solutions, value_set, values_by_weight, statistics
if timeout_reached:
break
return solutions, value_set, values_by_weight, statistics
def _get_predicted_values(values, predicted_operation, constants_values, end_time, settings, statistics):
if len(values) > 1 and predicted_operation.name in ['torch.cat(tensors, dim)', 'torch.stack(tensors)', 'torch.stack(tensors, dim)']:
stacked_value = all_operations.find_operation_with_name('PairCreationOperation').apply(values, settings)
if stacked_value is None:
predicted_values = []
else:
predicted_values = predicted_operation.enumerate_values_with_values(
given_values=[[stacked_value]],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
if len(values) == 2 and predicted_operation.name in ['torch.mul(input, other)']:
new_values = []
for value in values:
if value.is_tensor and value.value.dtype == torch.bool:
new_values.append(all_operations.find_operation_with_name('IntOperation').apply([value], settings))
else:
new_values.append(value)
# values = [all_operations.find_operation_with_name('IntOperation').apply(value, settings) if value.is value.value.dtype == torch.bool and value is not None else value for value in values]
predicted_values = predicted_operation.enumerate_values_with_values(
given_values=[[value] for value in new_values],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
elif len(values) == 3 and predicted_operation.name in ['torch.where(condition, input, other)', 'torch.where(condition, self, other)']:
if values[0].value.dtype != torch.bool:
values[0] = all_operations.find_operation_with_name('BoolOperation').apply([values[0]], settings)
predicted_values = predicted_operation.enumerate_values_with_values(
given_values= [[value] for valule in values],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
elif len(values) == 1 and predicted_operation.name in ['torch.argmax(input)', 'torch.argmax(input, dim)']:
if values[0].value.dtype != torch.int:
values[0] = all_operations.find_operation_with_name('IntOperation').apply([values[0]], settings)
predicted_values = predicted_operation.enumerate_values_with_values(
given_values=[[values[0]]],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
else:
predicted_values = predicted_operation.enumerate_values_with_values(
given_values=[[value] for value in values],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
return predicted_values
# TODO: DFS will speed up the search further
def _find_solutions_first_sequence(
benchmark: benchmark_module.Benchmark,
operations: List[operation_base.Operation],
start_time: float,
settings: settings_module.Settings,
prediction_model: Optional[PredictionModel] = None,
snippet_constant_multipliers: Optional[Dict[Text, float]] = None
) -> Tuple[
List[Solution],
Set[value_module.Value],
ValuesByWeight,
Optional[operation_statistics.OperationStatistics],
]:
"""Helper, returning (solutions, value_set, values_by_weight, statistics)."""
timeout_reached = False
end_time = start_time + settings.timeout
only_minimal_solutions = settings.only_minimal_solutions
if settings.max_solutions == 1:
# If we only want one solution, it will be minimal.
only_minimal_solutions = True
# An object to track statistics, if requested.
statistics = (
operation_statistics.OperationStatistics()
if settings.printing.statistics
else None
)
# A list of Solution namedtuples.
solutions = []
# A set of string solution expressions (don't return duplicate solutions).
solution_expression_set = set()
# The output value to search for.
output_value = value_module.OutputValue(benchmark.examples[0].output)
# A list of OrderedDicts mapping Value objects to themselves. The i-th
# OrderedDict contains all Value objects of weight i.
values_by_weight = [
collections.OrderedDict() for _ in range(settings.max_weight + 1)
]
# Find and cache the constant and casting operations for use later.
constant_operation = None
int_operation = None
float_operation = None
bool_operation = None
for operation in operations:
if operation.name == torch_functions.CONSTANT_OPERATION_NAME:
constant_operation = operation
elif operation.name == torch_functions.INT_OPERATION_NAME:
int_operation = operation
elif operation.name == torch_functions.FLOAT_OPERATION_NAME:
float_operation = operation
elif operation.name == torch_functions.BOOL_OPERATION_NAME:
bool_operation = operation
# Create the output dtype value for use later.
dtype_value = value_module.ConstantValue(output_value.dtype)
# Populate values_by_weight with inputs and constants. This also prints
# inputs/output/constants to stdout.
_add_constants_and_inputs_and_print(
values_by_weight, benchmark, output_value, constant_operation, settings, snippet_constant_multipliers
)
# A set storing all values found so far.
value_set = set().union(*values_by_weight)
constants_values = [value for value in value_set if not value.is_tensor]
# non_primitive_values = [value for value in value_set if value.is_tensor or value.is_sequence]
non_primitive_values = [value for value in value_set if isinstance(value, value_module.InputValue)]
filter_cache = filtered_values_cache.FilteredValuesCache()
if settings.model.do_first_in_seq:
value_set = []
for _ in range(3):
value_set = list(set(value_set).union(set(non_primitive_values)))
value_trial_list = [[]]
value_trial_list.extend([value] for value in value_set)
value_trial_list.extend(product(value_set, value_set))
for values in value_trial_list:
example = {"inputs": values, "output": output_value}
predicted_operations = prediction_model.get_first_in_sequence(example=example, settings=settings)
for predicted_operation in predicted_operations:
predicted_values = _get_predicted_values(values, predicted_operation, constants_values, end_time, settings, statistics)
for predicted_value in predicted_values:
if predicted_value not in value_set:
if settings.printing.verbose:
expression = predicted_value.reconstruct_expression()
print("[prediction] {} produces:\n{}".format(expression, predicted_value))
if predicted_value == output_value:
end_time = _check_solution_found(predicted_value, output_value, benchmark,
0, start_time, end_time,
solutions, solution_expression_set, settings, True)
if len(solutions) >= settings.max_solutions:
return (
solutions,
value_set,
values_by_weight,
statistics
)
elif all_operations.find_operation_with_name('IntOperation').apply([predicted_value], settings) == output_value:
end_time = _check_solution_found(predicted_value, output_value, benchmark,
0, start_time, end_time,
solutions, solution_expression_set, settings, True)
if len(solutions) >= settings.max_solutions:
return (
solutions,
value_set,
values_by_weight,
statistics
)
else:
value_set.append(predicted_value)
if timeit.default_timer() > end_time:
timeout_reached = True
# Don't return immediately; still try to cast new values because this is
# relatively quick.
break
# Try casting new values to the output dtype if this has a chance of being
# a correct solution.
for new_value in value_set:
if (new_value.shape == output_value.shape
and new_value.dtype != output_value.dtype
and operation_filtering.is_castable(new_value, dtype_value)
):
casted_value = None
if output_value.dtype == torch.int:
casted_value = int_operation.apply([new_value], settings)
elif output_value.dtype == torch.bool:
casted_value = bool_operation.apply([new_value], settings)
elif output_value.dtype == torch.float:
casted_value = float_operation.apply([new_value], settings)
if casted_value == output_value:
possible_first_solution = not solutions
# Found solution(s), but some may be bad.
_record_solutions(
casted_value,
0,
start_time,
solutions,
solution_expression_set,
benchmark,
settings,
)
if possible_first_solution and solutions:
end_time = min(
end_time,
timeit.default_timer() + settings.max_extra_solutions_time,
)
if len(solutions) >= settings.max_solutions:
return solutions, value_set, values_by_weight, statistics
if only_minimal_solutions and solutions:
return solutions, value_set, values_by_weight, statistics
if timeout_reached:
break
return solutions, value_set, values_by_weight, statistics
def _find_solutions(
benchmark: benchmark_module.Benchmark,
operations: List[operation_base.Operation],
start_time: float,
settings: settings_module.Settings,
prediction_model: Optional[PredictionModel] = None,
snippet_constant_multipliers: Optional[Dict[Text, float]] = None
) -> Tuple[
List[Solution],
Set[value_module.Value],
ValuesByWeight,
Optional[operation_statistics.OperationStatistics],
]:
"""Helper, returning (solutions, value_set, values_by_weight, statistics)."""
timeout_reached = False
end_time = start_time + settings.timeout
only_minimal_solutions = settings.only_minimal_solutions
if settings.max_solutions == 1:
# If we only want one solution, it will be minimal.
only_minimal_solutions = True
# An object to track statistics, if requested.
statistics = (
operation_statistics.OperationStatistics()
if settings.printing.statistics
else None
)
# A list of Solution namedtuples.
solutions = []
# A set of string solution expressions (don't return duplicate solutions).
solution_expression_set = set()
# The output value to search for.
output_value = value_module.OutputValue(benchmark.examples[0].output)
# A list of OrderedDicts mapping Value objects to themselves. The i-th
# OrderedDict contains all Value objects of weight i.
values_by_weight = [
collections.OrderedDict() for _ in range(settings.max_weight + 1)
]
# Find and cache the constant and casting operations for use later.
constant_operation = None
int_operation = None
float_operation = None
bool_operation = None
for operation in operations:
if operation.name == torch_functions.CONSTANT_OPERATION_NAME:
constant_operation = operation
elif operation.name == torch_functions.INT_OPERATION_NAME:
int_operation = operation
elif operation.name == torch_functions.FLOAT_OPERATION_NAME:
float_operation = operation
elif operation.name == torch_functions.BOOL_OPERATION_NAME:
bool_operation = operation
# Create the output dtype value for use later.
dtype_value = value_module.ConstantValue(output_value.dtype)
# Populate values_by_weight with inputs and constants. This also prints
# inputs/output/constants to stdout.
_add_constants_and_inputs_and_print(
values_by_weight, benchmark, output_value, constant_operation, settings, snippet_constant_multipliers
)
# A set storing all values found so far.
value_set = set().union(*values_by_weight)
constants_values = [value for value in value_set if not value.is_tensor]
non_primitive_values = [value for value in value_set if value.is_tensor or value.is_sequence]
filter_cache = filtered_values_cache.FilteredValuesCache()
if settings.model.do_iterative_prediction:
# try with values in value_set and run prediction
value_trial_list = []
value_trial_list.extend([[value] for value in non_primitive_values])
value_trial_list.extend(product(non_primitive_values, non_primitive_values))
for values in value_trial_list:
example = {"inputs": values, "output": output_value}
predicted_operations = prediction_model.get_predicted_operations(example=example, settings=settings)
for predicted_operation in predicted_operations:
if len(values) > 1 and predicted_operation.name in ['torch.cat(tensors, dim)', 'torch.stack(tensors)', 'torch.stack(tensors, dim)']:
stacked_value = all_operations.find_operation_with_name('PairCreationOperation').apply(values, settings)
if stacked_value is None:
predicted_values = []
else:
predicted_values = predicted_operation.enumerate_values_with_values(
given_values=[[stacked_value]],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
else:
predicted_values = predicted_operation.enumerate_values_with_values(
given_values=[[value] for value in values],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
for predicted_value in predicted_values:
if predicted_value not in value_set:
if settings.printing.verbose:
expression = predicted_value.reconstruct_expression()
print("[prediction] {} produces:\n{}".format(expression, predicted_value))
if predicted_value == output_value:
end_time = _check_solution_found(predicted_value, output_value, benchmark,
0, start_time, end_time,
solutions, solution_expression_set, settings, True)
if len(solutions) >= settings.max_solutions:
return (
solutions,
value_set,
values_by_weight,
statistics,
)
else:
if settings.model.do_first_in_seq:
value_set.add(predicted_value)
# Value search by weight.
for weight in range(1, settings.max_weight + 1):
if settings.printing.progress:
print("Searching weight {}...".format(weight))
# Values with the current weight. This might already include leaf values.
new_values = values_by_weight[weight]
# # Random iteration of operations
for operation in random.sample(operations, len(operations)):
for value in operation.enumerate_values_with_weight(
target_weight=weight,
values_by_weight=values_by_weight,
filter_cache=filter_cache,
end_time=end_time,
settings=settings,
statistics=statistics,
):
if value not in value_set:
# This value has never been seen before, or it's the desired output.
if settings.printing.verbose:
expression = value.reconstruct_expression()
print("{} produces:\n{}".format(expression, value))
if value == output_value:
end_time = _check_solution_found(value, output_value, benchmark,
weight, start_time, end_time,
solutions, solution_expression_set, settings)
if len(solutions) >= settings.max_solutions:
return (
solutions,
value_set,
values_by_weight,
statistics,
)
else:
# Only store the value if it isn't a solution. Otherwise, we'll get
# lots of "almost duplicate" solutions, e.g., by adding 0.
new_values[value] = value
# We should never add output_value (or anything equal) to value_set
# so that we can continue finding other solutions.
value_set.add(value)
if settings.model.do_iterative_prediction:
if not value.is_tensor:
continue
value_trial_list = [[value]]
value_trial_list.extend(product([value], non_primitive_values))
value_trial_list.extend(product(non_primitive_values, [value]))
for values in value_trial_list:
example = {"inputs": values, "output": output_value}
predicted_operations = prediction_model.get_predicted_operations(example=example, settings=settings)
for predicted_operation in predicted_operations:
if len(values) > 1 and predicted_operation.name in ['torch.cat(tensors, dim)', 'torch.stack(tensors)', 'torch.stack(tensors, dim)']:
stacked_value = all_operations.find_operation_with_name('PairCreationOperation').apply(values, settings)
if stacked_value is None:
predicted_values = []
else:
predicted_values = predicted_operation.enumerate_values_with_values(
given_values=[[stacked_value]],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
else:
predicted_values = predicted_operation.enumerate_values_with_values(
given_values=[[value] for value in values],
potential_value_list=constants_values,
end_time=end_time,
settings=settings,
statistics=statistics
)
for predicted_value in predicted_values:
if predicted_value not in value_set:
if settings.printing.verbose:
expression = predicted_value.reconstruct_expression()
print("[prediction] {} produces:\n{}".format(expression, predicted_value))
if predicted_value == output_value:
end_time = _check_solution_found(predicted_value, output_value, benchmark,
0, start_time, end_time,
solutions, solution_expression_set, settings, True)
if len(solutions) >= settings.max_solutions:
return (
solutions,
value_set,
values_by_weight,
statistics,
)
else:
if settings.model.do_first_in_seq:
value_set.add(predicted_value)
else: # This value has been seen before.
if value in new_values:
# The value was already computed differently with this weight.
original_value = new_values[value]
if isinstance(original_value, value_module.OperationValue):
# Only merge reconstructions if this was originally an
# OperationValue. (It could be a ConstantValue instead.)
operation_value = (
original_value
) # type: value_module.OperationValue
operation_value.merge_reconstructions(value)
elif not only_minimal_solutions:
# If we want non-minimal solutions, we need to store the value even
# if we have already seen that value with a smaller weight.
new_values[value] = value
if timeit.default_timer() > end_time:
timeout_reached = True
# Don't return immediately; still try to cast new values because this is
# relatively quick.
break
# Try casting new values to the output dtype if this has a chance of being
# a correct solution.
for new_value in new_values:
if (new_value.shape == output_value.shape
and new_value.dtype != output_value.dtype
and operation_filtering.is_castable(new_value, dtype_value)
):
casted_value = None
if output_value.dtype == torch.int:
casted_value = int_operation.apply([new_value], settings)
elif output_value.dtype == torch.bool:
casted_value = bool_operation.apply([new_value], settings)
elif output_value.dtype == torch.float:
casted_value = float_operation.apply([new_value], settings)
if casted_value == output_value:
possible_first_solution = not solutions
# Found solution(s), but some may be bad.
_record_solutions(
casted_value,
weight,
start_time,
solutions,
solution_expression_set,
benchmark,
settings,
)
if possible_first_solution and solutions:
end_time = min(
end_time,
timeit.default_timer() + settings.max_extra_solutions_time,
)
if len(solutions) >= settings.max_solutions:
return solutions, value_set, values_by_weight, statistics
if settings.printing.progress:
print(
"Found {} distinct values of weight {}, or {} total.".format(
len(new_values), weight, len(value_set)
)
)
if only_minimal_solutions and solutions:
return solutions, value_set, values_by_weight, statistics
if timeout_reached:
break
return solutions, value_set, values_by_weight, statistics
def _combine_multipliers(
first: Dict[Text, float], second: Dict[Text, float]
) -> Dict[Text, float]:
"""Combines operation weight multiplier dicts. Modifies the first dict."""
for name in second:
first[name] = first.get(name, 1.0) * second[name]
return first
def get_reweighted_operations(
benchmark: benchmark_module.Benchmark,
settings: settings_module.Settings,
description_handler: Optional[DescriptionHandler] = None,
prediction_model: Optional[PredictionModel] = None,
snippet_operation_multipliers: Optional[Dict[Text, float]] = None
) -> List[operation_base.Operation]:
"""Returns a list of operations with correct weights for the problem."""
include_sparse_operations = (
not settings.operations.limit_sparse_operations or _contains_sparse(benchmark)
)
operations = all_operations.get_operations(
include_sparse_operations=include_sparse_operations
)
operation_names = [op.name for op in operations]
if len(operation_names) != len(set(operation_names)):
raise ValueError("Operation names were not unique.")
if settings.paper_experiments.uniform_weights:
# Only for experiments in the PLDI paper.
for operation in operations:
operation.weight = 1
return operations
multipliers = {}
if description_handler and benchmark.description:
multipliers = _combine_multipliers(
multipliers,
description_handler.get_operation_multipliers(benchmark, settings),
)
if prediction_model and settings.model.use_multiplier:
multipliers = _combine_multipliers(
multipliers,
prediction_model.get_operation_multipliers(benchmark, settings),
)
if snippet_operation_multipliers:
multipliers = _combine_multipliers(
multipliers,
snippet_operation_multipliers
)
for operation in operations:
operation.weight = max(
1, int(round(operation.weight * multipliers.get(operation.name, 1)))
)
return operations
def run_value_search(
benchmark: benchmark_module.Benchmark,
settings: settings_module.Settings,
description_handler: Optional[DescriptionHandler] = None,
prediction_model: Optional[PredictionModel] = None,
snippet_handler: Optional[SnippetHandler] = None,
) -> ValueSearchResults:
"""Performs value search, iterating by the expression weight.
Starts with the constants and user-provided inputs, and applies the given
operations, for a given number of iterations. An expression's "weight" is the
number of nodes in the expression tree.
Args:
benchmark: The Benchmark containing input-output examples and constants.
settings: A Settings object containing settings for this search.
description_handler: A DescriptionHandler that scores operations based on
the benchmark's description.
prediction_model: A PredictionModel that scores operations based on
the pre-trained prediction model.
snippet_handler: A SnippetHandler that scores operations based on
the the original snippet.
Returns:
A ValueSearchResults namedtuple.
Raises:
ValueError: If max_weight is too large to be reasonable.
"""
_suppress_warnings()
if len(benchmark.examples) > 1:
print("Warning: for now, value search only uses a single example.")
start_time = timeit.default_timer()
snippet_operation_multipliers = None
snippet_constant_multipliers = None
if benchmark.snippet:
snippet_operation_multipliers, snippet_constant_multipliers = snippet_handler.get_multipliers(benchmark, settings)
operations = get_reweighted_operations(
benchmark,
settings,
description_handler=description_handler,
prediction_model=prediction_model,
snippet_operation_multipliers=snippet_operation_multipliers
)
if settings.model.use_multi_model:
solutions, value_set, values_by_weight, statistics = _find_solutions_multi_model(
benchmark=benchmark,
operations=operations,
start_time=start_time,
settings=settings,
prediction_model=prediction_model,
snippet_constant_multipliers=snippet_constant_multipliers
)
elif settings.model.do_first_in_seq:
solutions, value_set, values_by_weight, statistics = _find_solutions_first_sequence(
benchmark=benchmark,
operations=operations,
start_time=start_time,
settings=settings,
prediction_model=prediction_model,
snippet_constant_multipliers=snippet_constant_multipliers
)
else:
solutions, value_set, values_by_weight, statistics = _find_solutions(
benchmark=benchmark,
operations=operations,
start_time=start_time,
settings=settings,
prediction_model=prediction_model,
snippet_constant_multipliers=snippet_constant_multipliers
)
total_time = timeit.default_timer() - start_time
if solutions:
if settings.printing.print_solutions:
print()
print(
"Solution was found in {:.1f} seconds:\n{}".format(
solutions[0].time, solutions[0].expression
)
)
if settings.max_solutions != 1:
print(
"Found {} solution(s) in {:.1f} seconds total.".format(
len(solutions), total_time
)
)
else:
if settings.printing.print_solutions:
print(
"Could not find solution within {} seconds.".format(
min(settings.timeout, total_time)
)
)
sys.stdout.flush()
return ValueSearchResults(
solutions=solutions,
total_time=total_time,
value_set=value_set,
values_by_weight=values_by_weight,
benchmark=benchmark,
settings=settings,
statistics=statistics,
)
def run_value_search_from_example(
inputs: Union[List[Any], Dict[Text, Any]],
output: Any,
settings: Optional[settings_module.Settings] = None,
**kwargs
) -> ValueSearchResults:
"""Performs value search for a single user-provided input-output example.
Args:
inputs: A list of inputs, or a dict mapping input names to inputs.
output: The corresponding desired output.
settings: An optional Settings object to use, or None to use defaults.
**kwargs: The kwarg 'constants' can be used to specify a list of constants,
and 'description' can be used to provide a natural language description of
the task. Other arguments are passed directly to run_value_search().
Returns:
A ValueSearchResults namedtuple.
"""
if settings is None:
settings = settings_module.default_settings()
constants = kwargs.pop("constants", None)
description = kwargs.pop("description", None)
snippet = kwargs.pop("snippet", None)
source = kwargs.pop("source", "From user-provided example.")
benchmark = benchmark_module.Benchmark(
examples=[benchmark_module.Example(inputs, output)],
constants=constants, # Will turn into empty list if constants=None.
description=description, # Will turn into '' if description=None.
snippet=snippet,
source=source,
)
description_handler = description_handler_factory.create_handler(
settings.description_handler_name
)
if settings.printing.print_init:
print("Description handler: {!r}\n".format(description_handler))
prediction_model = prediction_model_factory.load_model(
"classification"
)
if settings.printing.print_init:
print("Prediction model: {!r}\n".format(prediction_model))
snippet_handler = snippet_handler_factory.create_handler(
"function_constant"
)
if settings.printing.print_init:
print("Snippet handler: {!r}\n".format(snippet_handler))
return run_value_search(benchmark, settings, **kwargs)
|
APIsynth-master
|
Synthesis_incorporation/value_search/value_search.py
|
# Copyright 2021 The TF-Coder Authors.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines the base Operation class for value search."""
import abc
import itertools
import sys
import timeit
import typing
from typing import Callable, Dict, List, Optional, Sequence, Set, Text, Tuple, Union
import six
from tf_coder import tf_coder_utils
from tf_coder import torch_functions
from tf_coder.value_search import filtered_values_cache
from tf_coder.value_search import operation_statistics
from tf_coder.value_search import value
from tf_coder.value_search import value_search_settings as settings_module
################################################################################
# Type aliases.
# The i-th element contains all Value objects of weight i, mapped to themselves.
ValuesByWeightDict = List[Dict[value.Value, value.Value]]
# The i-th element is an iterable of all Value objects of weight i.
ValuesByWeightIterable = List[Union[List[value.Value], Dict[value.Value, value.Value]]]
# The i-th inner list contains Value objects that are candidates for argument i.
ArgOptionsType = List[List[value.Value]]
# The i-th Value is used as the i-th argument for an Operation application.
ArgValuesType = Sequence[value.Value]
# An optional filter function that is applied to single Value objects. If None,
# it is treated as a function that always returns True (all Value objects are
# allowed).
ValueFilterType = Optional[Callable[[value.Value], bool]]
# An optional filter function that is applied to a list of argument values. If
# None, it is treated as a function that always returns True (all argument lists
# are allowed).
ApplyFilterType = Optional[Callable[[ArgValuesType], bool]]
################################################################################
OperationMetadata = typing.NamedTuple("OperationMetadata", [("docstring", Text)])
@six.add_metaclass(abc.ABCMeta)
class Operation(object):
"""An operation that can be applied to a constant number of arguments.
Arguments are always ordered, and subclasses can choose their own conventions
for this ordering. The operation must be deterministic, must not have
side-effects, and must not modify its arguments.
Attributes:
name: A unique name for this operation.
num_args: The number of arguments required by this Operation.
weight: The weight of this node in the AST.
metadata: Metadata for this Operation.
_value_filters_list: A list of lists of filter functions.
Each inner list has length num_args and contains a filter function for
each argument, where the i-th filter function takes a Value and returns
whether that Value should be an option for the i-th argument. Any filter
function can be None, which means all values should be options for that
argument.
The outer list can have multiple lists of filter functions, where each
inner list describes one class of valid argument values.
The value_filters_list attribute can also be None, in which case all
values should be options for all arguments.
_apply_filter: A filter function that takes a list of Value objects of
length num_args (the arguments to a potential application of this
Operation), and returns whether those Value objects are compatible (i.e.,
whether the operation should be applied). If None, the operation is always
applied.
_name_cache: A cached copy of this Operation's name.
"""
def __init__(self, num_args: int, weight: int, metadata: OperationMetadata) -> None:
"""Initializes an Operation."""
self.num_args = num_args
self.weight = weight
self.metadata = metadata
self._value_filters_list = None # type: Optional[List[List[ValueFilterType]]]
self._apply_filter = None # type: ApplyFilterType
self._name_cache = None
@property
def name(self) -> Text:
"""The (cached) name of the operation."""
if self._name_cache is not None:
return self._name_cache
self._name_cache = self._compute_name()
return self._name_cache
def _compute_name(self) -> Text:
"""Computes a name for this operation."""
return self.__class__.__name__
def add_value_filters(self, value_filters: List[ValueFilterType]) -> None:
"""Adds the given value filters to the value_filters_list attribute.
Args:
value_filters: A list of filter functions, one per argument, where the
i-th filter function takes a Value and returns whether it should be
an option for argument i.
Raises:
ValueError: If the list of filter functions has the wrong length.
"""
if len(value_filters) != self.num_args:
raise ValueError("value_filters must contain one filter per argument.")
if self._value_filters_list is None:
self._value_filters_list = []
self._value_filters_list.append(value_filters)
def set_apply_filter(self, apply_filter: ApplyFilterType) -> None:
"""Sets the given apply_filter."""
self._apply_filter = apply_filter
@abc.abstractmethod
def apply(
self, arg_values: ArgValuesType, settings: settings_module.Settings
) -> Optional[value.Value]:
"""Applies this Operation to a list of arguments (Value objects).
Args:
arg_values: A list of Value objects representing the arguments.
settings: A Settings object storing settings for this search.
Returns:
A Value object representing the result if successful, or None if the
operation raises an exception.
"""
def _enumerate_values(
self,
arg_options: ArgOptionsType,
end_time: float,
settings: settings_module.Settings,
statistics: Optional[operation_statistics.OperationStatistics] = None,
) -> List[value.Value]:
"""Enumerates values that are created from multiple choices of arguments.
Args:
arg_options: A list of lists of Value objects, where the i-th list
contains the possible Value objects for the i-th argument.
end_time: A timeit.default_timer() cutoff where this should timeout.
settings: A Settings object storing settings for this search.
statistics: An optional OperationStatistics object to track statistics
during this function's execution.
Returns:
A list of Value objects, one for every successful application of the
operation.
"""
results = [] # type: List[value.Value]
apply_count = 0
apply_successes = 0
start_time = timeit.default_timer()
for i, arg_values in enumerate(itertools.product(*arg_options)):
# Check for timeout periodically.
if i % 1000 == 0 and timeit.default_timer() > end_time:
break
# Skipping filtering is only used for experiments in the PLDI paper.
if not (
settings.paper_experiments.skip_filtering
and self.name not in torch_functions.REQUIRES_FILTERING
):
# _apply_filter is either None or callable.
if self._apply_filter is not None and not self._apply_filter(
arg_values
): # pylint: disable=not-callable
continue
if settings.printing.all_apply:
print(
"Applying {} on arguments: {}".format(
self.name,
[
arg_value.reconstruct_expression()
for arg_value in arg_values
],
)
)
# Print the output immediately so it isn't swallowed by a stacktrace.
sys.stdout.flush()
maybe_value = self.apply(arg_values, settings)
apply_count += 1
if maybe_value is not None:
yes_value = maybe_value # type: value.Value
apply_successes += 1
results.append(yes_value)
elapsed_time = timeit.default_timer() - start_time
if statistics:
statistics.update(
operation_name=self.name,
count=apply_count,
successes=apply_successes,
time=elapsed_time,
)
return results
def enumerate_values_with_values(
self,
given_values: List[List[value.Value]],
potential_value_list: List[value.Value],
end_time: float,
settings: settings_module.Settings,
statistics: Optional[operation_statistics.OperationStatistics] = None,
) -> List[value.Value]:
"""Enumerates values with given fixed argument values.
Args:
given_values: A list of lists of Value objects, where the i-th list
contains the Value objects for the i-th argument.
potential_value_list: A list of Value objects that can be used for
arguments that are not already given.
end_time: A timeit.default_timer() cutoff where this should timeout.
settings: A Settings object storing settings for this search.
statistics: An optional OperationStatistics object to track statistics
during this function's execution.
Returns:
A list of Value objects that are output of the given input arguments.
"""
num_args = self.num_args
if num_args == 0:
return [] # An operation with no arguments can't have variable weight.
if num_args < len(given_values):
return [] # It got more values than the operation needs.
results = [] # type: List[value.Value]
for value_filters in self._value_filters_list:
assert len(value_filters) == num_args
arg_options = []
is_valid_option = True
if len(given_values) == 0:
for arg in range(num_args):
arg_option = list(filter(value_filters[arg], potential_value_list))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
else:
# no need to enumerate
if self.name in ['torch.add(input, other)',
'torch.any(input)',
'torch.argmax(input)',
'torch.bincount(input)',
'torch.cdist(x1, x2)',
'torch.div(input, other)',
'torch.eq(input, other)',
'torch.gt(input, other)',
'torch.lt(input, other)',
'torch.masked_select(input, mask)',
'torch.matmul(input, other)',
'torch.max(input)',
'torch.minimum(input, other)',
'torch.mul(input, other)',
'torch.ne(input, other)',
'torch.searchsorted(sorted_sequence, input)',
'torch.squeeze(input)',
'torch.square(input)',
'torch.stack(tensors)',
'torch.sum(input)',
'torch.where(condition, input, other)',
'torch.where(condition, self, other)',
]:
if num_args != len(given_values):
return results
for arg in range(num_args):
arg_option = list(filter(value_filters[arg], given_values[arg]))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
# enumerate second arg
elif self.name in ['torch.any(input, dim)',
'torch.argmax(input, dim)',
'torch.max(input, dim)',
'torch.nn.functional.one_hot(input, num_classes)',
'torch.reshape(input, shape)',
'torch.stack(tensors, dim)',
'torch.sum(input, dim)',
'torch.tile(input, dims)',
'torch.squeeze(input, dim)',
'torch.unsqueeze(input, dim)',
'ExpandOperation'
]:
if num_args != len(given_values) + 1:
return results
arg_option = list(filter(value_filters[0], given_values[0]))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
arg_option = list(filter(value_filters[1], potential_value_list))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
# enumerate second arg, third fixed
elif self.name in ['torch.gather(input, dim, index)']:
if num_args != len(given_values)+1:
return results
arg_option = list(filter(value_filters[0], given_values[0]))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
arg_option = list(filter(value_filters[1], potential_value_list))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
arg_option = list(filter(value_filters[2], given_values[1]))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
# enumerate the third arg
elif self.name in ['torch.repeat_interleave(input, repeats, dim)',
'torch.tensordot(a, b, dims)']:
if num_args != len(given_values)+1:
return results
for arg in range(2):
arg_option = list(filter(value_filters[arg], given_values[arg]))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
arg_option = list(filter(value_filters[2], potential_value_list))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
# enumerate the second and the third
elif self.name in ['torch.roll(input, shifts, dims)',
'torch.transpose(input, dim0, dim1)']:
if num_args != len(given_values)+2:
return results
arg_option = list(filter(value_filters[0], given_values[0]))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
for arg in range(1, 3):
arg_option = list(filter(value_filters[arg], potential_value_list))
if len(arg_option) == 0:
is_valid_option = False
arg_options.append(arg_option)
if is_valid_option:
results.extend(
self._enumerate_values(arg_options, end_time, settings, statistics)
)
return results
def enumerate_values_with_weight(
self,
target_weight: int,
values_by_weight: ValuesByWeightDict,
filter_cache: filtered_values_cache.FilteredValuesCache,
end_time: float,
settings: settings_module.Settings,
statistics: Optional[operation_statistics.OperationStatistics] = None,
) -> List[value.Value]:
"""Enumerates values with a given target weight.
Args:
target_weight: The desired weight of resulting values.
values_by_weight: A collection of Values organized by their weight.
filter_cache: The FilteredValuesCache object used during this search.
end_time: A timeit.default_timer() cutoff where this should timeout.
settings: A Settings object storing settings for this search.
statistics: An optional OperationStatistics object to track statistics
during this function's execution.
Returns:
A list of Value objects of the specified weight.
"""
num_args = self.num_args
if num_args == 0:
return [] # An operation with no arguments can't have variable weight.
if target_weight - self.weight - num_args < 0:
return [] # Too many arguments for this weight.
results = [] # type: List[value.Value]
for value_filters in self._value_filters_list:
assert len(value_filters) == num_args
# Enumerate ways of partitioning (target_weight - self.weight) into
# (num_args) positive pieces.
# Equivalently, partition (target_weight - self.weight - num_args) into
# (num_args) nonnegative pieces.
arg_options_list = [] # type: List[ArgOptionsType]
for partition in tf_coder_utils.generate_partitions(
target_weight - self.weight - num_args, num_args
): # type: Tuple[int, ...] # pytype: disable=annotation-type-mismatch
if (
settings.paper_experiments.skip_filtering
and self.name not in torch_functions.REQUIRES_FILTERING
):
# Only for experiments in the PLDI paper.
arg_options = [
values_by_weight[weight_minus_1 + 1]
for arg, weight_minus_1 in enumerate(partition)
] # type: ArgOptionsType # pytype: disable=annotation-type-mismatch
else:
arg_options = [
filter_cache.filter_values(
value_filters[arg],
weight_minus_1 + 1,
values_by_weight[weight_minus_1 + 1],
)
for arg, weight_minus_1 in enumerate(partition)
] # type: ArgOptionsType
arg_options_list.append(arg_options)
for arg_options in arg_options_list:
results.extend(
self._enumerate_values(arg_options, end_time, settings, statistics)
)
return results
def reconstruct_expression(self, arg_values: ArgValuesType, use_cache=True) -> Text:
"""Returns an expression for this operation applied to the given arguments.
This can be slow and should not be called in a tight loop.
Args:
arg_values: A list of Value objects representing the arguments' values.
use_cache: If True, the reconstruction may be looked up from a cache. If
False, the reconstruction will be recomputed on each call.
Returns:
A string representation of the code expression.
"""
arg_strings = [
arg_value.reconstruct_expression(use_cache=use_cache)
for arg_value in arg_values
]
return self.reconstruct_expression_from_strings(arg_strings)
def reconstruct_expression_with_input_names(
self, arg_values: ArgValuesType
) -> Tuple[Text, Set[Text]]:
"""Returns an expression for this operation and the used input names."""
arg_strings_list, input_names_list = zip(
*[
arg_value.reconstruct_expression_with_input_names()
for arg_value in arg_values
]
)
return (
self.reconstruct_expression_from_strings(arg_strings_list),
set.union(*input_names_list),
)
@abc.abstractmethod
def reconstruct_expression_from_strings(self, arg_strings: List[Text]) -> Text:
"""Returns an expression for this operation applied to the given arguments.
This can be slow and should not be called in a tight loop.
Args:
arg_strings: A list of strings representing the arguments'
reconstructions.
Returns:
A string representation of the code expression.
"""
|
APIsynth-master
|
Synthesis_incorporation/value_search/operation_base.py
|
# Copyright 2021 The TF-Coder Authors.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Settings specific to the value search approach to the TF-Coder problem."""
import ast
import os
from typing import Any, Dict, List, Text
class Settings(object):
"""Stores settings for TF-Coder's value search algorithm."""
def __init__(self):
# A string describing the current version of the search algorithm.
self.algorithm_version = (
"Value search, "
"TF-IDF (k=5, min_score=0.15), "
"tensor features model with F_1 loss and max weighting, "
"2020/08/26"
)
# Time limit in seconds.
self.timeout = 300
# Maximum number of solutions to search for.
self.max_solutions = 1
# Whether to only search for solutions with minimal weight.
self.only_minimal_solutions = True
# Maximum number of seconds to spend searching for solutions after the
# first.
self.max_extra_solutions_time = 10
# Maximum weight of an expression to search for.
self.max_weight = 300
# Whether to require solutions to use all inputs, at least one input, or no
# restriction.
self.require_all_inputs_used = True
self.require_one_input_used = True
# The description handler to use.
# self.description_handler_name = "tfidf_5_0.15"
self.description_handler_name = "no_change"
# Other settings organized into separate objects.
self.operations = OperationSettings()
self.model = ModelSettings()
self.printing = PrintSettings()
self.paper_experiments = PaperExperimentSettings()
# Used to parse setting names.
_GROUP_NAMES = ["operations", "tensor_model", "printing", "paper_experiments"]
def set(self, name: Text, value: Any) -> None:
"""Sets the setting with the given name to the given value.
Args:
name: The name of the setting to set. For example, 'timeout' is used to
set `self.timeout`, and either 'printing.statistics' or
'printing_statistics' can be used to set `self.printing.statistics`.
value: The value to set the setting to.
"""
if hasattr(self, name):
setattr(self, name, value)
else:
for group_name in Settings._GROUP_NAMES:
if name.startswith(group_name) and name[len(group_name)] in {".", "_"}:
reduced_name = name[len(group_name) + 1 :]
group = getattr(self, group_name)
if hasattr(group, reduced_name):
setattr(group, reduced_name, value)
break
else:
raise ValueError(
"The name `{}` does not match any setting.".format(name)
)
def as_dict(self) -> Dict[Text, Any]:
"""Returns all settings as a dict."""
result = {}
for name, value in self.__dict__.items():
if name in Settings._GROUP_NAMES:
for inner_name, inner_value in value.__dict__.items():
full_name = name + "." + inner_name
result[full_name] = inner_value
else:
result[name] = value
return result
class OperationSettings(object):
"""Settings about operations to use during search."""
def __init__(self):
# Whether to limit sparse operations to benchmarks that contain
# SparseTensors in their examples.
self.limit_sparse_operations = False
# TODO(kshi): Add options to exclude specific operations, or prioritize
# user-chosen operations.
class ModelSettings(object):
"""Settings for the prediction model."""
def __init__(self):
# whether to use multiply model-predicted APIs before search
self.use_multiplier = False
# reweight constant. [0, 1)
self.multiplier = 0.75
# the number of APIs to reweight, given a ranked list from prediction model
self.multiplier_top_n = 3
# whether to use iterative search
self.do_iterative_prediction = False
# the number of APIs to evaluate for each prediction
self.do_first_in_seq = True
self.iterative_top_n = 10
self.beam_n = 3
# softmax probability threshold
self.threshold = 0.0
# whether to use multi-api prediction model
self.use_multi_model = False
# self.checkpoint_path = "manifold://bigcode/tree/pyCoder/daye_models/Single_100000_integer_30_aug10_nocasting_model.pt"
self.checkpoint_path = "manifold://bigcode/tree/pyCoder/data/multilabel_data_corrected/multilabel_200k_10k_10k_integer_16_aug29_shape_type_value_model.pt"
# self.api_map_path = "manifold://bigcode/tree/pyCoder/daye_models/Single_100000_integer_30_aug10_nocasting_api2indx.pt"
self.api_map_path = "manifold://bigcode/tree/pyCoder/data/multilabel_data_corrected/multilabel_200k_10k_10k_integer_16_aug29_shape_type_value_api2indx.pt"
# self.multi_ffn_path = "manifold://bigcode/tree/pyCoder/data/Composite_100000/ffn_model.pt"
# 16-exhaustive
# self.multi_ffn_path = "manifold://bigcode/tree/pyCoder/data/exhaustive_16api/2_train_net_model.pt"
# 33
self.multi_ffn_path = "manifold://bigcode/tree/pyCoder/data/gen_model/10_train_net_model.pt"
# self.multi_rnn_path = "manifold://bigcode/tree/pyCoder/data/Composite_100000/rnn_model.pt"
# 16-exhaustive
# self.multi_rnn_path = "manifold://bigcode/tree/pyCoder/data/exhaustive_16api/2_train_rnn_model.pt"
# 33
self.multi_rnn_path = "manifold://bigcode/tree/pyCoder/data/gen_model/10_train_rnn_model.pt"
# self.multi_api_map_path = "manifold://bigcode/tree/pyCoder/data/Composite_100000/api2indx17api.pt"
# 16-exhaustive
# self.multi_api_map_path = "manifold://bigcode/tree/pyCoder/data/exhaustive_16api/api2indx.pt"
self.multi_api_map_path = "manifold://bigcode/tree/pyCoder/data/gen_model/api2indx.pt"
self.embedding_size = 150
self.shape_embedding_size = 6
self.rnn_hidden_dims = 128
self.rnn_num_layers = 1
self.use_shape_encoding = True
self.use_type_encoding = True
self.use_value_encoding = True
class PrintSettings(object):
"""Settings that affect printing to stdout."""
def __init__(self):
# Whether to print initialization settings
self.print_init = True
# Whether to print examples
self.print_examples = True
# Whether to print solutions
self.print_solutions = True
# Whether to print intermediate results and progress. Setting this to True
# will cause significant slowdown from computing and printing many
# expressions.
self.verbose = False
# Whether to print every FunctionOperation application before it occurs.
# Setting this to True will cause a huge amount of output and significant
# slowdown.
self.all_apply = False
# Whether to print warnings about too-large tensors.
self.tensor_size_warnings = False
# Whether to print progress at each iteration of target expression weight.
self.progress = False
# Whether to print bad solutions.
self.bad_solutions = False
# Whether to print statistics about operations and executions.
self.statistics = False
# Whether to print statistics sorted by time (versus by name). Ignored if
# `statistics` is False.
self.statistics_sort_by_time = False
# Whether to print the operations that are prioritized or deprioritized.
self.prioritized_operations = False
self.deprioritized_operations = False
# Whether to print the predicted operations during the iterative predictions.
self.predicted_operations = False
class PaperExperimentSettings(object):
"""Settings for experiments in the PLDI 2020 paper."""
def __init__(self):
self.skip_filtering = False
self.uniform_weights = False
def default_settings() -> Settings:
"""Returns a Settings object with default settings."""
return Settings()
def from_dict(overrides: Dict[Text, Any]) -> Settings:
"""Sets settings using a dict to override defaults."""
settings = default_settings()
for name, value in overrides.items():
settings.set(name, value)
return settings
def from_list(overrides: List[Text]) -> Settings:
"""Sets settings using a list to override defaults.
Args:
overrides: A list of strings like 'timeout=120' or
'printing.statistics=True'. Each string should contain exactly one '='
character. The portion before the '=' character names a setting to
override. The portion after the '=' character describes the value of the
setting, in a form parseable by ast.literal_eval().
Raises:
ValueError: If any element of `overrides` cannot be processed
successfully.
Returns:
A Settings object.
"""
settings = default_settings()
for override_string in overrides:
if override_string.count("=") != 1:
raise ValueError(
"The override string {!r} does not contain exactly "
"one '=' character.".format(override_string)
)
equals_index = override_string.index("=")
name = override_string[:equals_index]
value_string = override_string[equals_index + 1 :]
try:
value = ast.literal_eval(value_string)
settings.set(name, value)
except Exception as e:
raise ValueError(
"Exception raised in ast.literal_eval on {!r}: {}".format(
value_string, e
)
)
return settings
|
APIsynth-master
|
Synthesis_incorporation/value_search/value_search_settings.py
|
# Copyright 2021 The TF-Coder Authors.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for operation filtering."""
import functools
import math
import operator
from typing import Any, Tuple, Type
import torch
from tf_coder import filter_group
from tf_coder import tensor_limits as limits
from tf_coder import tf_coder_utils
from tf_coder.value_search import value as value_module
@functools.lru_cache(maxsize=None)
def get_type_filter(desired_type):
"""Returns a value filter that only keeps values of the given type."""
return lambda arg_value: arg_value.type is desired_type
@functools.lru_cache(maxsize=None)
def get_types_filter(desired_types: Tuple[Type[Any], ...]):
"""Returns a value filter that only keeps values with the given types."""
return lambda arg_value: arg_value.type in desired_types
@functools.lru_cache(maxsize=None)
def get_dtype_filter(dtype):
"""Returns a value filter that only keeps tensor values of the given dtype."""
if not isinstance(dtype, torch.dtype):
raise TypeError("dtype must be a torch.dtype.")
return lambda arg_value: arg_value.dtype is dtype
@functools.lru_cache(maxsize=None)
def get_tensor_min_rank_filter(rank):
"""Returns a value filter that only keeps tensors of high enough rank."""
return lambda arg_value: arg_value.is_tensor and len(arg_value.shape) >= rank
def _check_tensor_finite(tensor):
"""Returns whether the float tensor contains all finite entries.
Args:
tensor: A float tensor. This cannot be an int tensor, or else
torch.math.is_finite() will fail!
"""
return bool(torch.all(torch.isfinite(tensor)))
def is_castable(to_cast, dtype):
"""Returns whether `to_cast` (a Value) can be safely casted to the dtype.
This filtering strategy is a workaround for undefined behavior in TensorFlow
(b/119633897).
Args:
to_cast: A Value object that would be casted.
dtype: A Value containing a torch.dtype that `to_cast` would be casted to.
"""
if not dtype.is_int_dtype():
return True # We can always cast to a non-int dtype.
to_cast_value = to_cast.value
if to_cast.is_sparse_tensor:
to_cast_value = to_cast.value.values
if to_cast.is_tensor or to_cast.is_sparse_tensor:
if not to_cast.has_float_dtype():
return True # Only float -> int is potentially unsafe.
if not _check_tensor_finite(to_cast_value):
return False # Non-finite floats cannot be casted to int dtypes.
elif to_cast.is_sequence:
if to_cast.elem_type is float:
if float("nan") in to_cast_value:
return False # inf and -inf will be caught by the min/max logic.
elif to_cast.elem_type_is_tensor:
return all(
element.size()
and is_castable(value_module.InputValue(element, "dummy"), to_cast)
for element in to_cast_value
)
elif to_cast.elem_type_is_sparse_tensor:
return all(
element.values.size()
and is_castable(value_module.InputValue(element, "dummy"), to_cast)
for element in to_cast_value
)
else:
return True # Only lists of floats or float tensors can be unsafe.
elif to_cast.type is float:
if math.isnan(to_cast_value):
return False
else:
return True
min_int, max_int = tf_coder_utils.INT_DTYPE_MIN_MAX[dtype.value]
# Floats are truncated when casted to int (nearest int in the zero direction).
# Assuming min_int <= 0, the minimum safe float is (min_int - 1 + epsilon),
# and the maximum safe float is (max_int + 1 - epsilon).
return to_cast.min() > min_int - 1 and to_cast.max() < max_int + 1
def broadcastable(shape_1, shape_2):
"""Returns whether the two shapes are broadcastable."""
return (
not shape_1
or not shape_2
or all(x == y or x == 1 or y == 1 for x, y in zip(shape_1[::-1], shape_2[::-1]))
)
# Constants for common filters. These are named with uppercase to reinforce the
# fact that these are constants and should be used as such, even though they are
# also technically functions.
# pylint: disable=invalid-name
# A filter that only keeps primitives.
PRIMITIVE_FILTER = operator.attrgetter("is_primitive")
# A filter that only keeps torch.DType objects.
DTYPE_FILTER = operator.attrgetter("is_dtype")
# A filter that only keeps sequences.
SEQUENCE_FILTER = operator.attrgetter("is_sequence")
# A filter that only keeps tensors.
TENSOR_FILTER = operator.attrgetter("is_tensor")
def FLOAT_TENSOR_FILTER(arg_value):
"""Only keeps float tensors."""
return arg_value.is_tensor and not arg_value.is_sparse_tensor and arg_value.has_float_dtype()
def NUMERIC_TENSOR_FILTER(arg_value):
"""Only keeps int and float tensors."""
return arg_value.is_tensor and not arg_value.is_sparse_tensor and (
arg_value.has_int_dtype() or arg_value.has_float_dtype()
)
def NUMERIC_PRIMITIVE_FILTER(arg_value):
"""Only keeps int and float primitives."""
return arg_value.is_primitive and arg_value.type is not bool
def NONSCALAR_NUMERIC_TENSOR_FILTER(arg_value):
"""Only keeps non-scalar int and float tensors."""
return NUMERIC_TENSOR_FILTER(arg_value) and len(arg_value.shape)
def INDICES_FILTER(arg_value):
"""Only keeps tensors/sequences containing ints suitable for indexing."""
return (
arg_value.is_tensor
and arg_value.has_int_dtype()
and arg_value.min() >= 0
and len(arg_value.shape) == 1
)
def GATHER_INDICES_FILTER(arg_value):
"""Only keeps tensors/sequences containing ints suitable for indexing."""
return (
arg_value.is_tensor
and arg_value.has_int_dtype()
and arg_value.min() >= 0
)
def AXIS_FILTER(arg_value):
"""Only keeps ints in the range [-1, limits.MAX_NUM_DIMENSIONS)."""
return arg_value.type is int and -1 <= arg_value.value < limits.MAX_NUM_DIMENSIONS
def AXIS_SEQUENCE_FILTER(arg_value):
"""Only keeps sequences of axis-like ints."""
return (
INTS_SEQUENCE_FILTER(arg_value)
and len(arg_value.value) <= limits.MAX_NUM_DIMENSIONS
and -1 <= arg_value.min()
and arg_value.max() < limits.MAX_NUM_DIMENSIONS
)
def PRIMITIVE_OR_SCALAR_TENSOR_FILTER(arg_value):
"""Only keeps primitives or scalar tensors."""
return arg_value.is_primitive or arg_value.is_tensor and arg_value.shape is None
def NON_SCALAR_TENSOR_FILTER(arg_value):
"""Only keeps tensors that are not scalars."""
return arg_value.is_tensor and arg_value.shape
def NOT_TENSOR_FILTER(arg_value):
"""Only keeps a value if it is not a Tensor or SparseTensor."""
return not arg_value.is_tensor and not arg_value.is_sparse_tensor and not arg_value.is_dtype
def PRIMITIVE_OR_TENSOR_FILTER(arg_value):
"""Only keeps primitives and tensors."""
return arg_value.is_primitive or arg_value.is_tensor
def NUMERIC_PRIMITIVE_OR_TENSOR_FILTER(arg_value):
"""Only keeps numeric primitives and tensors."""
return (NUMERIC_TENSOR_FILTER(arg_value)
or NUMERIC_PRIMITIVE_FILTER(arg_value))
def NONZERO_PRIMITIVE_OR_TENSOR_FILTER(arg_value):
"""Only keeps non-zero primitives and tensors"""
if NUMERIC_TENSOR_FILTER(arg_value):
return len(torch.nonzero(arg_value.value)) > 0
elif NUMERIC_PRIMITIVE_FILTER(arg_value):
return arg_value.value != 0
else:
return False
def TENSOR_1D_FILTER(arg_value):
"""Only keeps 1-D tensors."""
return arg_value.is_tensor and len(arg_value.shape) == 1
def CONTAINS_INTS_FILTER(arg_value):
"""Only keeps int sequences or int tensors."""
return arg_value.elem_type is int or arg_value.has_int_dtypes()
def INTS_SEQUENCE_FILTER(arg_value):
"""Only keeps int sequences ."""
return arg_value.elem_type is int
def TENSOR_SEQUENCE_FILTER(arg_value):
""" Only keeps a tensor sequence having same shapes and dtypes."""
if not arg_value.elem_type_is_tensor:
return False
dtype = arg_value.value[0].dtype
shape = arg_value.value[0].shape
for a in arg_value.value[1:]:
if a.dtype != dtype:
return False
if a.shape != shape:
return False
return True
def TENSOR_LIKE_SEQUENCE_FILTER(arg_value):
"""Only keeps rectangular possibly-nested sequences of primitives."""
return arg_value.is_sequence and arg_value.sequence_dtype is not None
def INT_OR_INT_TENSOR_FILTER(arg_value):
"""Only keeps int primitives or int tensors."""
return arg_value.type is int or (
arg_value.is_tensor and not arg_value.shape and arg_value.has_int_dtype()
)
def INT_LENGTH_FILTER(arg_value):
"""Only keeps int primitives or tensors representing a dimension length."""
return (
arg_value.type is int
and 0 < int(arg_value.value) <= limits.MAX_DIMENSION_LENGTH
)
def SHAPE_FILTER(arg_value):
"""Only keeps int sequences representing tensor shapes."""
return (
arg_value.is_sequence
and arg_value.elem_type is int
and 0 < len(arg_value.value) <= limits.MAX_NUM_DIMENSIONS
and arg_value.min() > 0
and arg_value.max() <= limits.MAX_DIMENSION_LENGTH
and arg_value.reduce_prod() <= limits.MAX_TENSOR_ELEMENTS
)
def TENSOR_OR_SPARSE_FILTER(arg_value):
"""Only keeps Tensors and SparseTensors."""
return arg_value.is_tensor or arg_value.is_sparse_tensor
def VECTOR_LENGTH_FILTER(arg_value):
"""Ensures that a vector of length N (N is the argument) is small enough."""
return (
INT_OR_INT_TENSOR_FILTER(arg_value)
and 0 < int(arg_value.value) <= limits.MAX_DIMENSION_LENGTH
)
def SQUARE_MATRIX_SIZE_FILTER(arg_value):
"""Ensures that an NxN matrix (N is the argument) is small enough."""
if not INT_OR_INT_TENSOR_FILTER(arg_value):
return False
num_rows = int(arg_value.value)
return (
0 < num_rows <= limits.MAX_DIMENSION_LENGTH
and num_rows ** 2 <= limits.MAX_TENSOR_ELEMENTS
)
def SEQUENCE_MASK_LENGTHS_FILTER(arg_value):
"""The value must contain few ints with a small maximum."""
# Only int tensors (not SparseTensors), or list of ints, are ok.
if not (
arg_value.is_tensor and arg_value.has_int_dtype() or arg_value.elem_type is int
):
return False
max_value = arg_value.max()
num_elements = arg_value.num_elements()
return num_elements > 0 and max_value * num_elements <= limits.MAX_TENSOR_ELEMENTS
def PADDINGS_FILTER(arg_value):
"""Must be a [N, 2] shape int32 tensor or nested sequence of ints."""
if arg_value.is_sequence:
elem_type = arg_value.elem_type
shape = arg_value.sequence_shape
else:
return False
if not (
elem_type in [int, float]
and len(shape) == 1
and shape[0] % 2 == 0
and shape[0] / 2 <= limits.MAX_NUM_DIMENSIONS
):
return False
return 0 <= arg_value.min() and arg_value.max() < limits.MAX_DIMENSION_LENGTH / 2
def BATCH_DIMS_FILTER(arg_value):
"""Must be an int representing a number of batch dimensions."""
return arg_value.type is int and 0 <= arg_value.value < limits.MAX_NUM_DIMENSIONS
def SCATTER_INDICES_FILTER(arg_value):
"""Must be an int tensor appropriate for indices in scatter operations."""
return (
arg_value.is_tensor
and arg_value.has_int_dtype
and len(arg_value.shape) >= 2
and arg_value.shape[-1] <= limits.MAX_NUM_DIMENSIONS
and arg_value.min() >= 0
and arg_value.max() < limits.MAX_DIMENSION_LENGTH
)
def BROADCASTABLE_APPLY_FILTER(arg_values):
"""The two args must be braodcastable."""
x, y = arg_values
return broadcastable(x.shape, y.shape)
def SAME_DTYPES_APPLY_FILTER(arg_values):
"""Ensures that the first two arguments have the same dtype."""
return arg_values[0].dtype == arg_values[1].dtype
def SAME_DTYPES_BROADCASTABLE_APPLY_FILTER(arg_values):
"""The two args must have the same dtypes and be broadcastable."""
x, y = arg_values
return x.dtype == y.dtype and broadcastable(x.shape, y.shape)
def SAME_SHAPES_APPLY_FILTER(arg_values):
"""Ensures that the first two arguments have the same shape."""
return arg_values[0].shape == arg_values[1].shape
def TENSOR_PRIMITIVE_SAME_TYPES_APPLY_FILTER(arg_values):
x, y = arg_values
if x.is_tensor:
if y.is_tensor:
return x.dtype == y.dtype
elif y.is_primitive:
if x.has_float_dtype() and y.type is float:
return True
elif x.has_int_dtype() and y.type is int:
return True
else:
return False
elif x.is_primitive:
if y.is_primitive:
return x.type == y.type
elif y.is_tensor:
if x.type is float and x.has_float_dtype():
return True
elif x.type is int and x.has_int_dtype():
return True
else:
return False
return False
def TENSOR_AXIS_IN_RANGE_APPLY_FILTER(arg_values):
"""Ensures the axis is less than the rank of the tensor."""
tensor, axis = arg_values
return axis.value < len(tensor.shape)
# End of section for filter constants. pylint: enable=invalid-name
# LINT.IfChange(add_filters_to_function_operation)
def add_filters_to_function_operation(function_operation):
"""Adds filters to the FunctionOperation depending on its FilterGroup."""
group = function_operation.function_info.filter_group
if group == filter_group.FilterGroup.NONE:
# Do nothing.
pass
elif group == filter_group.FilterGroup.SHAPE_1:
function_operation.add_value_filters([SHAPE_FILTER])
elif group == filter_group.FilterGroup.TENSOR_1:
function_operation.add_value_filters([TENSOR_FILTER])
elif group == filter_group.FilterGroup.TENSORSEQUENCE_1:
function_operation.add_value_filters([TENSOR_SEQUENCE_FILTER])
elif group == filter_group.FilterGroup.FLOATTENSOR_1:
function_operation.add_value_filters([FLOAT_TENSOR_FILTER])
elif group == filter_group.FilterGroup.NUMERICTENSOR_1:
function_operation.add_value_filters([NUMERIC_TENSOR_FILTER])
elif group == filter_group.FilterGroup.PRIMITIVE_OR_TENSOR_1:
function_operation.add_value_filters([PRIMITIVE_OR_TENSOR_FILTER])
elif group == filter_group.FilterGroup.TENSOR_AXIS_2:
function_operation.add_value_filters([TENSOR_FILTER, AXIS_FILTER])
function_operation.set_apply_filter(TENSOR_AXIS_IN_RANGE_APPLY_FILTER)
elif group == filter_group.FilterGroup.NUMERICTENSOR_AXIS_2:
function_operation.add_value_filters([NUMERIC_TENSOR_FILTER, AXIS_FILTER])
function_operation.set_apply_filter(TENSOR_AXIS_IN_RANGE_APPLY_FILTER)
elif group == filter_group.FilterGroup.TENSORSEQUENCE_AXIS_2:
function_operation.add_value_filters([TENSOR_SEQUENCE_FILTER, AXIS_FILTER])
elif group == filter_group.FilterGroup.TENSOR_BOOLTENSOR_2:
function_operation.add_value_filters(
[TENSOR_FILTER, get_dtype_filter(torch.bool)]
)
elif group == filter_group.FilterGroup.SAME_SHAPES_NUMERICTENSOR_2:
function_operation.add_value_filters([NUMERIC_TENSOR_FILTER] * 2)
function_operation.set_apply_filter(SAME_SHAPES_APPLY_FILTER)
elif group == filter_group.FilterGroup.SAME_DTYPE_NUMERIC_BROADCASTABLE_2:
function_operation.add_value_filters([NUMERIC_TENSOR_FILTER] * 2)
function_operation.set_apply_filter(SAME_DTYPES_BROADCASTABLE_APPLY_FILTER)
elif group == filter_group.FilterGroup.ELEMENTWISE_COMPARISON_2:
function_operation.add_value_filters(
[NUMERIC_TENSOR_FILTER, PRIMITIVE_OR_TENSOR_FILTER]
)
function_operation.set_apply_filter(BROADCASTABLE_APPLY_FILTER)
elif group == filter_group.FilterGroup.NE_BROADCASTABLE_2:
function_operation.add_value_filters(
[NUMERIC_TENSOR_FILTER, NONZERO_PRIMITIVE_OR_TENSOR_FILTER]
)
def _not_equal_broadcastable_filter(arg_values):
arg1, arg2 = arg_values
return (arg1 != arg2
and BROADCASTABLE_APPLY_FILTER(arg_values))
function_operation.set_apply_filter(_not_equal_broadcastable_filter)
# Operations with other special handling.
elif group == filter_group.FilterGroup.BINCOUNT_1:
def _bincount_filter(arg_value):
"""The value must contain nonnegative ints with a small maximum."""
# Must be an int tensor, lists of ints, or int primitive.
if not (
arg_value.is_tensor
and arg_value.has_int_dtype()
):
return False
max_value = arg_value.max()
min_value = arg_value.min()
return (min_value >= 0
and max_value <= limits.MAX_DIMENSION_LENGTH
and len(arg_value.shape) == 1)
function_operation.add_value_filters([_bincount_filter])
elif group == filter_group.FilterGroup.TENSORIZABLE_1:
def _tensorizable_filter(arg_value):
if arg_value.is_primitive:
return True
elif arg_value.is_sequence:
return not arg_value.elem_type_is_tensor
else:
return False
function_operation.add_value_filters([_tensorizable_filter])
elif group == filter_group.FilterGroup.BMM_2:
def _numeric_min_rank_3_filter(arg_value):
"""Must be an int or float tensor of rank = 3."""
return arg_value.is_tensor and len(arg_value.shape) == 3
def _bmm_filter(arg_values):
"""Ensures the third dimension of the first tensor equals to
the second dimension of the second tensor, and the first dimension
of the two argumetns should be equal."""
return (SAME_DTYPES_APPLY_FILTER(arg_values)
and arg_values[0].shape[2] == arg_values[1].shape[1]
and arg_values[0].shape[0] == arg_values[1].shape[0]
)
function_operation.add_value_filters([_numeric_min_rank_3_filter] * 2)
function_operation.set_apply_filter(_bmm_filter)
elif group == filter_group.FilterGroup.CAT_TENSORSEQUENCE_AXIS_2:
function_operation.add_value_filters([TENSOR_SEQUENCE_FILTER, AXIS_FILTER])
def _axis_in_range(arg_values):
"""Ensures the axis is at most the rank of the tensor."""
tensor, axis = arg_values
return axis.value < len(tensor.value[0].shape)
function_operation.set_apply_filter(_axis_in_range)
elif group == filter_group.FilterGroup.CDIST_2:
def _cdist_filter(arg_value):
return (arg_value.is_tensor
and arg_value.has_float_dtype()
and len(arg_value.shape) > 1)
function_operation.add_value_filters([_cdist_filter] * 2)
function_operation.set_apply_filter(SAME_SHAPES_APPLY_FILTER)
elif group == filter_group.FilterGroup.EYE_1:
function_operation.add_value_filters([SQUARE_MATRIX_SIZE_FILTER])
elif group == filter_group.FilterGroup.RANGE_1:
function_operation.add_value_filters([VECTOR_LENGTH_FILTER])
elif group == filter_group.FilterGroup.EXPAND_DIMS_2:
function_operation.add_value_filters([TENSOR_FILTER, AXIS_FILTER])
def _axis_in_range(arg_values):
"""Ensures the axis is at most the rank of the tensor."""
tensor, axis = arg_values
return axis.value < len(tensor.shape)
function_operation.set_apply_filter(_axis_in_range)
elif group == filter_group.FilterGroup.EXPAND_DIMS_ADDITIONAL_2:
function_operation.add_value_filters([TENSOR_FILTER, AXIS_FILTER])
def _axis_in_range(arg_values):
"""Ensures the axis is at most the rank of the tensor."""
tensor, axis = arg_values
return axis.value <= len(tensor.shape)
function_operation.set_apply_filter(_axis_in_range)
elif group == filter_group.FilterGroup.EYE_ROWS_COLS_2:
def _eye_rows_cols_apply_filter(arg_values):
"""Checks that the result will have a small number of elements."""
num_rows, num_cols = arg_values
return (
int(num_rows.value) * int(num_cols.value) <= limits.MAX_TENSOR_ELEMENTS
)
function_operation.add_value_filters([VECTOR_LENGTH_FILTER] * 2)
function_operation.set_apply_filter(_eye_rows_cols_apply_filter)
elif group == filter_group.FilterGroup.MATMUL_2:
def _numeric_min_rank_2_filter(arg_value):
"""Must be an int or float tensor of rank >= 2."""
return arg_value.is_tensor and len(arg_value.shape) >= 2
function_operation.add_value_filters([_numeric_min_rank_2_filter] * 2)
function_operation.set_apply_filter(SAME_DTYPES_APPLY_FILTER)
elif group == filter_group.FilterGroup.MM_2:
def _numeric_min_rank_2_filter(arg_value):
"""Must be an int or float tensor of rank = 2."""
return arg_value.is_tensor and len(arg_value.shape) == 2
def _mm_filter(arg_values):
"""Ensures the second dimension of the first tensor equals to
the first dimension of the second tensor."""
return (SAME_DTYPES_APPLY_FILTER(arg_values)
and arg_values[0].shape[1] == arg_values[1].shape[0]
)
function_operation.add_value_filters([_numeric_min_rank_2_filter] * 2)
function_operation.set_apply_filter(_mm_filter)
elif group == filter_group.FilterGroup.NORMALIZE_2:
def _complex_tensor_filter(arg_value):
return (arg_value.is_tensor
and arg_value.has_float_dtype())
function_operation.add_value_filters([_complex_tensor_filter, AXIS_FILTER])
def _axis_in_range(arg_values):
"""Ensures the axis is at most the rank of the tensor."""
tensor, axis = arg_values
return axis.value < len(tensor.shape)
function_operation.set_apply_filter(_axis_in_range)
elif group == filter_group.FilterGroup.ONE_HOT_2:
def _one_hot_indices_filter(arg_value):
"""Must contain ints and less than the max number of dimensions."""
return (
arg_value.is_tensor
and arg_value.dtype == torch.int64
and arg_value.min() >= 0
and len(arg_value.shape) < limits.MAX_NUM_DIMENSIONS
)
def _one_hot_apply_filter(arg_values):
"""Checks that the result will have a small number of elements."""
indices, num_classes = arg_values
return (
indices.num_elements() * int(num_classes.value) <= limits.MAX_TENSOR_ELEMENTS
and indices.max() < num_classes.value
)
function_operation.add_value_filters(
[_one_hot_indices_filter, INT_LENGTH_FILTER]
)
function_operation.set_apply_filter(_one_hot_apply_filter)
elif group == filter_group.FilterGroup.PAD_2:
function_operation.add_value_filters([TENSOR_FILTER, PADDINGS_FILTER])
def _pad_2_apply_filter(arg_values):
tensor, paddings = arg_values
paddings_shape = paddings.sequence_shape
return (
tensor.shape
and paddings_shape[0] / 2 <= len(tensor.shape)
)
function_operation.set_apply_filter(_pad_2_apply_filter)
elif group == filter_group.FilterGroup.RESHAPE_2:
def _reshape_filter(arg_values):
"""The new size must be compatible with its original size."""
tensor, shape = arg_values
num_tensor_elements = torch.prod(torch.tensor(tensor.value.shape))
num_shape_elements = torch.prod(torch.tensor(shape.value))
return (num_tensor_elements % num_shape_elements == 0
and num_shape_elements != 1)
function_operation.add_value_filters([TENSOR_FILTER, SHAPE_FILTER])
function_operation.set_apply_filter(_reshape_filter)
elif group == filter_group.FilterGroup.SEARCHSORTED_2:
def _sorted_last_dimension(arg_value):
"""Must be a numeric tensor that is sorted in the last dimension."""
return (
NONSCALAR_NUMERIC_TENSOR_FILTER(arg_value)
and (
arg_value.has_float_dtype()
or arg_value.dtype in [torch.int32, torch.int64]
)
and bool(
torch.all(torch.eq(arg_value.value, torch.sort(arg_value.value)[0]))
)
)
function_operation.add_value_filters(
[_sorted_last_dimension, NUMERIC_PRIMITIVE_OR_TENSOR_FILTER]
)
def _searchsorted_apply_filter(arg_values):
"""DTypes must match, dimension lengths equal except the last."""
sorted_sequence, values = arg_values
return (
sorted_sequence.dtype == values.dtype
and len(sorted_sequence.shape) == len(values.shape)
and sorted_sequence.shape[:-1] == values.shape[:-1]
)
function_operation.set_apply_filter(_searchsorted_apply_filter)
elif group == filter_group.FilterGroup.TILE_2:
def _tile_apply_filter(arg_values):
"""Checks that the result will have a small number of elements."""
tensor, multiples = arg_values
return (
multiples.min() > 0
and multiples.max() > 1
and multiples.reduce_prod() * tensor.num_elements()
<= limits.MAX_TENSOR_ELEMENTS
)
function_operation.add_value_filters([TENSOR_FILTER, AXIS_SEQUENCE_FILTER])
function_operation.set_apply_filter(_tile_apply_filter)
elif group == filter_group.FilterGroup.SQUEEZE_2:
def _very_squeezable_filter(arg_value):
"""Keeps tensors with more than 1 squeezable dimension."""
# If a tensor only has 1 squeezable dimension, then this operation is
# useless because it is simpler to use the one-arg version of squeeze.
return TENSOR_FILTER(arg_value) and (arg_value.shape or []).count(1) >= 2
function_operation.add_value_filters([_very_squeezable_filter, AXIS_FILTER])
def _squeeze_2_apply_filter(arg_values):
tensor, axis = arg_values
return axis.value < len(tensor.shape) and tensor.shape[axis.value] == 1
function_operation.set_apply_filter(_squeeze_2_apply_filter)
elif group == filter_group.FilterGroup.GATHER_3:
function_operation.add_value_filters(
[
NON_SCALAR_TENSOR_FILTER,
BATCH_DIMS_FILTER,
GATHER_INDICES_FILTER,
]
)
def _gather_3_apply_filter(arg_values):
params, batch_dims, indices = arg_values
batch_dims_int = batch_dims.value
indices_shape = (
indices.shape if indices.is_tensor else indices.sequence_shape
)
return (
indices.is_tensor
and batch_dims_int < min(len(indices_shape), len(params.shape))
and params.shape[:batch_dims_int] == indices_shape[:batch_dims_int]
and indices_shape
# It is also required that index.size(d) <= input.size(d) for all dimensions d != dim
and all([(indices_shape[d] <= params.shape[d]) or d == batch_dims_int for d in range(min(len(params.shape), len(indices_shape)))])
and indices.max() < params.shape[batch_dims_int]
and
# Upper bound on resulting tensor size.
indices.num_elements() * params.num_elements()
<= limits.MAX_TENSOR_ELEMENTS
)
function_operation.set_apply_filter(_gather_3_apply_filter)
elif group == filter_group.FilterGroup.INDEX_SELECT_3:
function_operation.add_value_filters(
[
NON_SCALAR_TENSOR_FILTER,
BATCH_DIMS_FILTER,
INDICES_FILTER,
]
)
def _index_select_3_apply_filter(arg_values):
params, dim, indices = arg_values
dim_int = dim.value
indices_shape = indices.shape
return (
dim_int < len(params.shape)
and indices_shape
and indices.max() < max(params.shape)
and
# Upper bound on resulting tensor size.
indices.num_elements() * params.num_elements()
<= limits.MAX_TENSOR_ELEMENTS
)
function_operation.set_apply_filter(_index_select_3_apply_filter)
elif group == filter_group.FilterGroup.RANGE_3:
def _range_3_apply_filter(arg_values):
"""Checks that the range will end up having a small number of elements."""
start, limit, delta = arg_values
return (
delta.value != 0
and 0
< len(range(start.value, limit.value, delta.value))
<= limits.MAX_DIMENSION_LENGTH
)
function_operation.add_value_filters([get_type_filter(int)] * 3)
function_operation.set_apply_filter(_range_3_apply_filter)
elif group == filter_group.FilterGroup.REPEAT_3:
def _repeat_filter(arg_value):
return (INT_OR_INT_TENSOR_FILTER(arg_value)
and arg_value.min() > 0)
def _repeat_3_apply_filter(arg_values):
"""Checks the first two arguments are broadcastable
and the third argument is at most the rank of the tensor."""
return (BROADCASTABLE_APPLY_FILTER([arg_values[0], arg_values[1]])
and TENSOR_AXIS_IN_RANGE_APPLY_FILTER([arg_values[0], arg_values[2]]))
function_operation.add_value_filters([NUMERIC_TENSOR_FILTER, _repeat_filter, AXIS_FILTER])
function_operation.set_apply_filter(_repeat_3_apply_filter)
elif group == filter_group.FilterGroup.ROLL_3:
# The case where the shift and axis are both single integers.
function_operation.add_value_filters(
[TENSOR_FILTER, INT_OR_INT_TENSOR_FILTER, AXIS_FILTER]
)
# The case where the shift and axis are both sequences of integers.
function_operation.add_value_filters(
[TENSOR_FILTER, INTS_SEQUENCE_FILTER, AXIS_SEQUENCE_FILTER]
)
def _roll_apply_filter(arg_values):
tensor, shift, axis = arg_values
if axis.type is int:
return axis.value < len(tensor.shape)
else:
return len(axis.value) == len(shift.value) and axis.max() < len(
tensor.shape
)
function_operation.set_apply_filter(_roll_apply_filter)
elif group == filter_group.FilterGroup.TENSORDOT_3:
def _tensordot_arg_3_filter(arg_value):
"""The argument "axes" must have axis-like ints and the right shape."""
if arg_value.type is int:
# An int N means "sum over the last N axes of a and the first N axes of
# b in order", so 0 <= N <= maximum rank.
return 0 <= arg_value.value <= limits.MAX_NUM_DIMENSIONS
if arg_value.elem_type is int:
# List of length 2 is ok, elements must be valid axes.
return (
len(arg_value.value) == 2
and 0 <= arg_value.min()
and arg_value.max() < limits.MAX_NUM_DIMENSIONS
)
# Otherwise, must be an int tensor of shape [2] or [2, k].
return (
arg_value.is_tensor
and arg_value.has_int_dtype()
and 1 <= len(arg_value.shape) <= 2
and arg_value.shape[0] == 2
and 0 <= arg_value.min()
and arg_value.max() < limits.MAX_NUM_DIMENSIONS
)
function_operation.add_value_filters(
[
NONSCALAR_NUMERIC_TENSOR_FILTER,
NONSCALAR_NUMERIC_TENSOR_FILTER,
_tensordot_arg_3_filter,
]
)
def _tensordot_apply_filter(arg_value):
"""First two tensors must have same dtype, and axes must be in range."""
a, b, axes = arg_value
if (
a.dtype != b.dtype
or
# This check is overly conservative for the sake of efficiency; the
# resulting number of elements is most likely smaller but will take
# effort to compute more precisely.
a.num_elements() * b.num_elements() > limits.MAX_TENSOR_ELEMENTS
):
return False
a_rank = len(a.shape)
b_rank = len(b.shape)
min_rank = min(a_rank, b_rank)
if axes.type is int:
return axes.value <= min_rank
elif axes.is_sequence or len(axes.shape) == 1:
# axes is a list or tensor of shape [2].
return axes.max() < min_rank
else: # axes is a tensor of shape [2, k].
return (
axes.shape[1] <= min_rank
and tf_coder_utils.max_tensor_value(axes.value[0]) < a_rank
and tf_coder_utils.max_tensor_value(axes.value[1]) < b_rank
)
function_operation.set_apply_filter(_tensordot_apply_filter)
elif group == filter_group.FilterGroup.TRANSPOSE_3:
def _transpose_3_apply_filter(arg_values):
"""Checks that perm has length equal to the number of a's dimensions."""
tensor, dim0, dim1 = arg_values
return (dim0.value < len(tensor.shape)
and dim1.value < len(tensor.shape)
and dim0.value < dim1.value)
function_operation.add_value_filters(
[TENSOR_FILTER, BATCH_DIMS_FILTER, BATCH_DIMS_FILTER]
)
function_operation.set_apply_filter(_transpose_3_apply_filter)
elif group == filter_group.FilterGroup.WHERE_TENSOR_3:
def _where_apply_filter(arg_values):
"""Ensures that the last two arguments have matching shapes and dtypes."""
condition, x, y = arg_values
return (TENSOR_PRIMITIVE_SAME_TYPES_APPLY_FILTER([x, y])
and broadcastable(condition.shape, x.shape)
and broadcastable(condition.shape, y.shape)
and x != y)
function_operation.add_value_filters(
[
get_dtype_filter(torch.bool),
NUMERIC_TENSOR_FILTER,
NUMERIC_PRIMITIVE_OR_TENSOR_FILTER,
]
)
function_operation.set_apply_filter(_where_apply_filter)
elif group == filter_group.FilterGroup.WHERE_NUMERIC_3:
def _where_apply_filter(arg_values):
"""Ensures that the last two arguments have matching shapes and dtypes."""
condition, x, y = arg_values
return (TENSOR_PRIMITIVE_SAME_TYPES_APPLY_FILTER([x, y])
and broadcastable(condition.shape, x.shape)
and broadcastable(condition.shape, y.shape)
and x != y)
function_operation.add_value_filters(
[
get_dtype_filter(torch.bool),
NUMERIC_PRIMITIVE_FILTER,
NUMERIC_PRIMITIVE_OR_TENSOR_FILTER,
]
)
function_operation.set_apply_filter(_where_apply_filter)
else:
raise ValueError(
"Unknown filter group {} for FunctionOperation {}.".format(
group, function_operation.name
)
)
# LINT.ThenChange()
# It is reasonable to strengthen or relax a filtering strategy here without
# involving a change to the filter groups.
|
APIsynth-master
|
Synthesis_incorporation/value_search/operation_filtering.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines Operation objects for Python operators."""
import torch
from tf_coder.value_search import operation_base
from tf_coder.value_search import operation_filtering as filtering
from tf_coder.value_search import value
# Weights for Python operations.
SIZE_WEIGHT = 32
INT_WEIGHT = 16
FLOAT_WEIGHT = 16
BOOL_WEIGHT = 16
VIEW_WEIGHT = 28
EXPAND_WEIGHT = 24
# "Docstrings" for Python operations, so they can used for ranking in the same
# way as for TensorFlow operations.
SIZE_DOCSTRING = """
Returns the size of the self tensor. The returned value is a subclass of tuple.
"""
INT_DOCSTRING = """
Cast the self tensor to int.
"""
FLOAT_DOCSTRING = """
Cast the self tensor to float.
"""
BOOL_DOCSTRING = """
Cast the self tensor to bool.
"""
VIEW_DOCSTRINGS = """
Returns a new tensor with the same data as the self tensor but of a different shape.
"""
EXPAND_DOCSTRINGS = """
Returns a new view of the self tensor with singleton dimensions expanded to a larger size.
"""
class SizeOperation(operation_base.Operation):
def __init__(self):
metadata = operation_base.OperationMetadata(docstring=SIZE_DOCSTRING)
super(SizeOperation, self).__init__(
num_args=1, weight=SIZE_WEIGHT, metadata=metadata)
self.add_value_filters([filtering.NON_SCALAR_TENSOR_FILTER])
def apply(self, arg_values, settings):
"""See base class."""
try:
return value.OperationValue(arg_values[0].value.size(),
self, arg_values)
except Exception: # pylint: disable=broad-except
return None
def reconstruct_expression_from_strings(self, arg_strings):
"""See base class."""
if len(arg_strings) == 1:
return arg_strings[0] + '.size()'
else:
return arg_strings[0] + '.size(' + arg_strings[1] + ')'
class IntOperation(operation_base.Operation):
def __init__(self):
metadata = operation_base.OperationMetadata(docstring=INT_DOCSTRING)
super(IntOperation, self).__init__(
num_args=1, weight=INT_WEIGHT, metadata=metadata)
def _non_int_tensor_filter(arg_value):
"""Only keeps values that are non-int tensors."""
return arg_value.is_tensor and not arg_value.has_int_dtype()
self.add_value_filters([_non_int_tensor_filter])
def apply(self, arg_values, settings):
"""See base class."""
try:
return value.OperationValue(arg_values[0].value.long(),
self, arg_values)
except Exception: # pylint: disable=broad-except
return None
def reconstruct_expression_from_strings(self, arg_strings):
"""See base class."""
return arg_strings[0] + '.long()'
class FloatOperation(operation_base.Operation):
def __init__(self):
metadata = operation_base.OperationMetadata(docstring=FLOAT_DOCSTRING)
super(FloatOperation, self).__init__(
num_args=1, weight=FLOAT_WEIGHT, metadata=metadata)
def _non_float_tensor_filter(arg_value):
"""Only keeps values that are non-float tensors."""
return arg_value.is_tensor and not arg_value.has_float_dtype()
self.add_value_filters([_non_float_tensor_filter])
def apply(self, arg_values, settings):
"""See base class."""
try:
return value.OperationValue(arg_values[0].value.float(),
self, arg_values)
except Exception: # pylint: disable=broad-except
return None
def reconstruct_expression_from_strings(self, arg_strings):
"""See base class."""
return arg_strings[0] + '.float()'
class BoolOperation(operation_base.Operation):
def __init__(self):
metadata = operation_base.OperationMetadata(docstring=BOOL_DOCSTRING)
super(BoolOperation, self).__init__(
num_args=1, weight=BOOL_WEIGHT, metadata=metadata)
def _non_bool_tensor_filter(arg_value):
"""Only keeps values that are non-bool tensors."""
return arg_value.is_tensor and not arg_value.has_bool_dtype()
self.add_value_filters([_non_bool_tensor_filter])
def apply(self, arg_values, settings):
"""See base class."""
try:
return value.OperationValue(arg_values[0].value.bool(),
self, arg_values)
except Exception: # pylint: disable=broad-except
return None
def reconstruct_expression_from_strings(self, arg_strings):
"""See base class."""
return arg_strings[0] + '.bool()'
class ViewOperation(operation_base.Operation):
def __init__(self):
metadata = operation_base.OperationMetadata(docstring=VIEW_DOCSTRINGS)
super(ViewOperation, self).__init__(
num_args=2, weight=VIEW_WEIGHT, metadata=metadata)
def _size_compatable_filter(arg_values):
"""The new size must be compatible with its original size."""
in1, in2 = arg_values
return torch.prod(torch.tensor(in1.value.shape)) % torch.prod(torch.abs(torch.tensor(in2.value))) == 0
self.add_value_filters([filtering.TENSOR_FILTER, filtering.SHAPE_FILTER])
self.set_apply_filter(_size_compatable_filter)
def apply(self, arg_values, settings):
"""See base class."""
try:
return value.OperationValue(arg_values[0].value.view(arg_values[1].value),
self, arg_values)
except Exception: # pylint: disable=broad-except
return None
def reconstruct_expression_from_strings(self, arg_strings):
"""See base class."""
return arg_strings[0] + '.view(' + arg_strings[1] + ')'
class ExpandOperation(operation_base.Operation):
def __init__(self):
metadata = operation_base.OperationMetadata(docstring=EXPAND_DOCSTRINGS)
super(ExpandOperation, self).__init__(
num_args=2, weight=EXPAND_WEIGHT, metadata=metadata)
def _size_compatable_filter(arg_values):
"""The new size must be compatible with its original size."""
in1, in2 = arg_values
in1_dims_len = len(in1.value.shape)
in2_dims_len = len(in2.value)
if in1_dims_len > in2_dims_len:
return False
for i in range(in1_dims_len, in2_dims_len):
if (in2.value[i] == -1
):
return False
return True
self.add_value_filters([filtering.TENSOR_FILTER, filtering.SHAPE_FILTER])
self.set_apply_filter(_size_compatable_filter)
def apply(self, arg_values, settings):
"""See base class."""
try:
return value.OperationValue(arg_values[0].value.expand(arg_values[1].value),
self, arg_values)
except Exception: # pylint: disable=broad-except
return None
def reconstruct_expression_from_strings(self, arg_strings):
"""See base class."""
return arg_strings[0] + '.expand(' + arg_strings[1] + ')'
|
APIsynth-master
|
Synthesis_incorporation/value_search/tensor_member_operations.py
|
# Copyright 2021 The TF-Coder Authors.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Manages all Operation objects used by value search."""
import inspect
from typing import List, Optional, Text
from tf_coder import torch_functions
from tf_coder.models import prediction_model
from tf_coder.value_search import function_operation
from tf_coder.value_search import operation_base
from tf_coder.value_search import python_operations
from tf_coder.value_search import tensor_member_operations
def get_python_operations() -> List[operation_base.Operation]:
"""Returns a list of Operation objects from the python_operations module."""
operation_classes = inspect.getmembers(
python_operations, lambda x: inspect.isclass(x) and not inspect.isabstract(x)
)
return [operation_class() for unused_name, operation_class in operation_classes]
def get_member_operations() -> List[operation_base.Operation]:
"""Returns a list of Operation objects from the torch_member_operations module."""
operation_classes = inspect.getmembers(
tensor_member_operations,
lambda x: inspect.isclass(x) and not inspect.isabstract(x),
)
return [operation_class() for unused_name, operation_class in operation_classes]
def get_torch_operations() -> List[operation_base.Operation]:
"""Returns a list of Operation objects for dense PyTorch operations."""
return [
function_operation.FunctionOperation(function_info)
for function_info in torch_functions.TORCH_FUNCTIONS
]
def get_sparse_operations() -> List[operation_base.Operation]:
"""Returns a list of Operation objects for sparse operations."""
return [function_operation.FunctionOperation(function_info)
for function_info in torch_functions.SPARSE_FUNCTIONS]
def get_operations(
include_sparse_operations: bool = False,
) -> List[operation_base.Operation]:
"""Returns a list of Operation objects that value search should use."""
operations = []
operations.extend(get_torch_operations())
if include_sparse_operations:
operations.extend(get_sparse_operations())
operations.extend(get_python_operations())
operations.extend(get_member_operations())
return operations
def find_operation_with_name(
operation_name: Text,
operation_list: Optional[List[operation_base.Operation]] = None,
) -> operation_base.Operation:
"""Finds an operation with the given name, optionally within a given list."""
if operation_list is None:
operation_list = get_operations(include_sparse_operations=True)
matching_operations = [op for op in operation_list if op.name == operation_name]
if len(matching_operations) == 1:
return matching_operations[0]
raise ValueError(
"Found {} operations matching the name {}".format(
len(matching_operations), operation_name
)
)
def find_operation_with_partial_name(
operation_name: Text,
operation_list: Optional[List[operation_base.Operation]] = None,
) -> operation_base.Operation:
"""Finds an operation with the given name, optionally within a given list."""
if operation_list is None:
operation_list = get_operations(include_sparse_operations=True)
mapped_name = prediction_model.PREDICTION_TO_NAME_MAP[operation_name]
matching_operations = [op for op in operation_list if mapped_name in op.name]
return matching_operations
|
APIsynth-master
|
Synthesis_incorporation/value_search/all_operations.py
|
# Copyright 2021 The TF-Coder Authors.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines the Operation objects for functions."""
import re
import torch
from tf_coder import tensor_limits as limits
from tf_coder import tf_coder_utils
from tf_coder import torch_functions
from tf_coder.value_search import operation_base
from tf_coder.value_search import operation_filtering
from tf_coder.value_search import value
class FunctionOperation(operation_base.Operation):
"""An operation that applies a function to some arguments.
The arguments must be given in the same order as they appear in the function's
signature.
Arguments with default values in the function signature are optional at the
time of FunctionOperation creation. However, once created, a FunctionOperation
must be used with all of the arguments provided to its constructor.
"""
def __init__(self, function_info):
"""Creates a FunctionOperation.
Args:
function_info: A tf_functions.FunctionInfo.
"""
(
function_name,
arg_names,
constant_kwargs,
) = torch_functions.parse_function_info_name(function_info)
self._function_obj = tf_coder_utils.get_torch_function(function_name)
docstring = self._function_obj.__doc__
if not docstring:
print(
"Warning: could not get docstring for function {}".format(function_name)
)
docstring = ""
# Make sure the function and argument names appear in the docstring. (Args
# should already appear in the docstring "Args" section though.)
docstring += "\n" + function_info.name
# If 'reduce_max' is the function name, make sure 'reduce' and 'max' also
# appear as separate words. Ditto for argument names as well.
docstring += "\n" + function_info.name.replace("_", " ")
# Upweight the function name (moreso than the argument names).
function_name_without_torch = re.sub(r"^torch\.", "", function_name)
docstring += ("\n" + function_name_without_torch) * 4
if "_" in function_name_without_torch:
docstring += ("\n" + function_name_without_torch.replace("_", " ")) * 2
metadata = operation_base.OperationMetadata(docstring=docstring)
super(FunctionOperation, self).__init__(
num_args=len(arg_names), weight=function_info.weight, metadata=metadata
)
self.function_info = function_info
self.function_name = function_name
self.arg_names = arg_names
self.constant_kwargs = constant_kwargs
operation_filtering.add_filters_to_function_operation(self)
def _compute_name(self):
return self.function_info.name
def _print_warnings(self, arg_values, result_value):
if isinstance(result_value, torch.Tensor):
num_elements = tf_coder_utils.num_tensor_elements(result_value)
else:
return
if num_elements > 10 * limits.MAX_TENSOR_ELEMENTS:
print(
"Warning: {} produced much-too-large tensor of shape {} and {} "
"elements.".format(
self.name, result_value.shape.as_list(), num_elements
)
)
for i, arg_value in enumerate(arg_values):
if isinstance(arg_value.value, torch.Tensor):
print(
" argument {} has shape {} and {} elements".format(
i, arg_value.shape, arg_value.num_elements()
)
)
if arg_value.num_elements() <= 20:
print(" argument {} is: {}".format(i, arg_value.value))
elif arg_value.is_primitive:
print(" argument {} is: {}".format(i, arg_value.value))
else:
print(" argument {} has type {}".format(i, type(arg_value.value)))
print(
" argument {} has reconstruction: {}".format(
i, arg_value.reconstruct_expression()
)
)
def apply(self, arg_values, settings):
"""See base class."""
value_objects = [arg_value.value for arg_value in arg_values]
arg_dict = dict(zip(self.arg_names, value_objects))
arg_dict.update(self.constant_kwargs)
try:
result_value = self._function_obj(**arg_dict)
except Exception as e:
if settings.printing.verbose:
expression = self.reconstruct_expression(arg_values)
print("[Error] {}: {}".format(expression, e))
return None
try:
return value.OperationValue(result_value, self, arg_values)
except ValueError:
if settings.printing.tensor_size_warnings:
self._print_warnings(arg_values, result_value)
return None
def reconstruct_expression_from_strings(self, arg_strings):
"""See base class."""
arg_strings = list(arg_strings)
for kwarg_name, kwarg_value in self.constant_kwargs.items():
arg_strings.append("{}={!r}".format(kwarg_name, kwarg_value))
return self.function_name + "(" + ", ".join(arg_strings) + ")"
|
APIsynth-master
|
Synthesis_incorporation/value_search/function_operation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from logging import getLogger
import math
import os
import shutil
import time
import torch
import torch.nn as nn
from src.data.loader import load_data, get_data_transformations
from src.model.model_factory import model_factory, to_cuda, sgd_optimizer, sobel2RGB
from src.slurm import init_signal_handler, trigger_job_requeue
from src.trainer import validate_network, accuracy
from src.utils import (bool_flag, init_distributed_mode, initialize_exp, AverageMeter,
restart_from_checkpoint, fix_random_seeds,)
from src.model.pretrain import load_pretrained
logger = getLogger()
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Train classification")
# main parameters
parser.add_argument("--dump_path", type=str, default=".",
help="Experiment dump path")
parser.add_argument('--epoch', type=int, default=0,
help='Current epoch to run')
parser.add_argument('--start_iter', type=int, default=0,
help='First iter to run in the current epoch')
parser.add_argument("--checkpoint_freq", type=int, default=20,
help="Save the model periodically ")
parser.add_argument("--evaluate", type=bool_flag, default=False,
help="Evaluate the model only")
parser.add_argument('--seed', type=int, default=35, help='random seed')
# model params
parser.add_argument('--sobel', type=bool_flag, default=0)
parser.add_argument('--sobel2RGB', type=bool_flag, default=False,
help='Incorporate sobel filter in first conv')
parser.add_argument('--pretrained', type=str, default='',
help='Use this instead of random weights.')
# datasets params
parser.add_argument('--data_path', type=str, default='',
help='Where to find ImageNet dataset')
parser.add_argument('--workers', type=int, default=8,
help='Number of data loading workers')
# optim params
parser.add_argument('--lr', type=float, default=0.05, help='Learning rate')
parser.add_argument('--wd', type=float, default=1e-5, help='Weight decay')
parser.add_argument('--nepochs', type=int, default=100,
help='Max number of epochs to run')
parser.add_argument('--batch_size', default=128, type=int)
# distributed training params
parser.add_argument('--rank', default=0, type=int,
help='rank')
parser.add_argument("--local_rank", type=int, default=-1,
help="Multi-GPU - Local rank")
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='', type=str,
help='url used to set up distributed training')
# debug
parser.add_argument("--debug", type=bool_flag, default=False,
help="Load val set of ImageNet")
parser.add_argument("--debug_slurm", type=bool_flag, default=False,
help="Debug within a SLURM job")
return parser.parse_args()
def main(args):
# initialize the multi-GPU / multi-node training
init_distributed_mode(args, make_communication_groups=False)
# initialize the experiment
logger, training_stats = initialize_exp(args, 'epoch', 'iter', 'prec',
'loss', 'prec_val', 'loss_val')
# initialize SLURM signal handler for time limit / pre-emption
init_signal_handler()
main_data_path = args.data_path
if args.debug:
args.data_path = os.path.join(main_data_path, 'val')
else:
args.data_path = os.path.join(main_data_path, 'train')
train_dataset = load_data(args)
args.data_path = os.path.join(main_data_path, 'val')
val_dataset = load_data(args)
# prepare the different data transformations
tr_val, tr_train = get_data_transformations()
train_dataset.transform = tr_train
val_dataset.transform = tr_val
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
# build model skeleton
fix_random_seeds(args.seed)
nmb_classes = 205 if 'places' in args.data_path else 1000
model = model_factory(args, relu=True, num_classes=nmb_classes)
# load pretrained weights
load_pretrained(model, args)
# merge sobel layers with first convolution layer
if args.sobel2RGB:
sobel2RGB(model)
# re initialize classifier
if hasattr(model.body, 'classifier'):
for m in model.body.classifier.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.fill_(0.1)
# distributed training wrapper
model = to_cuda(model, [args.gpu_to_work_on], apex=True)
logger.info('model to cuda')
# set optimizer
optimizer = sgd_optimizer(model, args.lr, args.wd)
## variables to reload to fetch in checkpoint
to_restore = {'epoch': 0, 'start_iter': 0}
# re start from checkpoint
restart_from_checkpoint(
args,
run_variables=to_restore,
state_dict=model,
optimizer=optimizer,
)
args.epoch = to_restore['epoch']
args.start_iter = to_restore['start_iter']
if args.evaluate:
validate_network(val_loader, [model], args)
return
# Supervised training
for _ in range(args.epoch, args.nepochs):
logger.info("============ Starting epoch %i ... ============" % args.epoch)
fix_random_seeds(args.seed + args.epoch)
# train the network for one epoch
adjust_learning_rate(optimizer, args)
scores = train_network(args, model, optimizer, train_dataset)
scores_val = validate_network(val_loader, [model], args)
# save training statistics
logger.info(scores + scores_val)
training_stats.update(scores + scores_val)
def adjust_learning_rate(optimizer, args):
lr = args.lr * (0.1 ** (args.epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train_network(args, model, optimizer, dataset):
"""
Train the models on the dataset.
"""
# swith to train mode
model.train()
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
# running statistics
batch_time = AverageMeter()
data_time = AverageMeter()
# training statistics
log_top1 = AverageMeter()
log_loss = AverageMeter()
end = time.perf_counter()
cel = nn.CrossEntropyLoss().cuda()
for iter_epoch, (inp, target) in enumerate(loader):
# measure data loading time
data_time.update(time.perf_counter() - end)
# start at iter start_iter
if iter_epoch < args.start_iter:
continue
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
output = model(inp)
# compute cross entropy loss
loss = cel(output, target)
optimizer.zero_grad()
# compute the gradients
loss.backward()
# step
optimizer.step()
# log
# signal received, relaunch experiment
if os.environ['SIGNAL_RECEIVED'] == 'True':
if not args.rank:
torch.save({
'epoch': args.epoch,
'start_iter': iter_epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, os.path.join(args.dump_path, 'checkpoint.pth.tar'))
trigger_job_requeue(os.path.join(args.dump_path, 'checkpoint.pth.tar'))
# update stats
log_loss.update(loss.item(), output.size(0))
prec1 = accuracy(args, output, target)
log_top1.update(prec1.item(), output.size(0))
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
# verbose
if iter_epoch % 100 == 0:
logger.info('Epoch[{0}] - Iter: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec {log_top1.val:.3f} ({log_top1.avg:.3f})\t'
.format(args.epoch, iter_epoch, len(loader), batch_time=batch_time,
data_time=data_time, loss=log_loss, log_top1=log_top1))
# end of epoch
args.start_iter = 0
args.epoch += 1
# dump checkpoint
if not args.rank:
torch.save({
'epoch': args.epoch,
'start_iter': 0,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, os.path.join(args.dump_path, 'checkpoint.pth.tar'))
if not (args.epoch - 1) % args.checkpoint_freq:
shutil.copyfile(
os.path.join(args.dump_path, 'checkpoint.pth.tar'),
os.path.join(args.dump_checkpoints,
'checkpoint' + str(args.epoch - 1) + '.pth.tar'),
)
return (args.epoch - 1, args.epoch * len(loader), log_top1.avg, log_loss.avg)
if __name__ == '__main__':
# generate parser / parse parameters
args = get_parser()
# run experiment
main(args)
|
DeeperCluster-main
|
eval_pretrain.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
from sklearn import metrics
from src.utils import AverageMeter, bool_flag, fix_random_seeds
from src.trainer import accuracy
from src.data.VOC2007 import VOC2007_dataset
from src.model.model_factory import model_factory, sgd_optimizer
from src.model.pretrain import load_pretrained
parser = argparse.ArgumentParser()
# model params
parser.add_argument('--pretrained', type=str, required=False, default='',
help='evaluate this model')
# data params
parser.add_argument('--data_path', type=str, default='',
help='Where to find pascal 2007 dataset')
parser.add_argument('--split', type=str, required=False, default='train',
choices=['train', 'trainval'], help='training split')
parser.add_argument('--sobel', type=bool_flag, default=False, help='If true, sobel applies')
# transfer params
parser.add_argument('--fc6_8', type=bool_flag, default=True, help='If true, train only the final classifier')
parser.add_argument('--eval_random_crops', type=bool_flag, default=True, help='If true, eval on 10 random crops, otherwise eval on 10 fixed crops')
# optim params
parser.add_argument('--nit', type=int, default=150000, help='Number of training iterations')
parser.add_argument('--stepsize', type=int, default=10000, help='Decay step')
parser.add_argument('--lr', type=float, required=False, default=0.003, help='learning rate')
parser.add_argument('--wd', type=float, required=False, default=1e-6, help='weight decay')
parser.add_argument('--seed', type=int, default=1993, help='random seed')
def main():
args = parser.parse_args()
args.world_size = 1
print(args)
fix_random_seeds(args.seed)
# create model
model = model_factory(args, relu=True, num_classes=20)
# load pretrained weights
load_pretrained(model, args)
model = model.cuda()
print('model to cuda')
# on which split to train
if args.split == 'train':
args.test = 'val'
elif args.split == 'trainval':
args.test = 'test'
# data loader
normalize = [transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]
dataset = VOC2007_dataset(args.data_path, split=args.split, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),] + normalize
))
loader = torch.utils.data.DataLoader(dataset,
batch_size=16, shuffle=False,
num_workers=4, pin_memory=True)
print('PASCAL VOC 2007 ' + args.split + ' dataset loaded')
# re initialize classifier
if hasattr(model.body, 'classifier'):
for m in model.body.classifier.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.fill_(0.1)
for m in model.pred_layer.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.fill_(0.1)
# freeze conv layers
if args.fc6_8:
if hasattr(model.body, 'features'):
for param in model.body.features.parameters():
param.requires_grad = False
# set optimizer
optimizer = torch.optim.SGD(
filter(lambda x: x.requires_grad, model.parameters()),
lr=args.lr,
momentum=0.9,
weight_decay=args.wd,
)
criterion = nn.BCEWithLogitsLoss(reduction='none')
print('Start training')
it = 0
losses = AverageMeter()
while it < args.nit:
it = train(
loader,
model,
optimizer,
criterion,
args.fc6_8,
losses,
current_iteration=it,
total_iterations=args.nit,
stepsize=args.stepsize,
)
print('Model Evaluation')
if args.eval_random_crops:
transform_eval = [
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),] + normalize
else:
transform_eval = [
transforms.Resize(256),
transforms.TenCrop(224),
transforms.Lambda(lambda crops: torch.stack([transforms.Compose(normalize)(transforms.ToTensor()(crop)) for crop in crops]))
]
print('Train set')
train_dataset = VOC2007_dataset(
args.data_path,
split=args.split,
transform=transforms.Compose(transform_eval),
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=1,
shuffle=False,
num_workers=4,
pin_memory=True,
)
evaluate(train_loader, model, args.eval_random_crops)
print('Test set')
test_dataset = VOC2007_dataset(args.data_path, split=args.test, transform=transforms.Compose(transform_eval))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=4,
pin_memory=True,
)
evaluate(test_loader, model, args.eval_random_crops)
def evaluate(loader, model, eval_random_crops):
model.eval()
gts = []
scr = []
for crop in range(9 * eval_random_crops + 1):
for i, (input, target) in enumerate(loader):
# move input to gpu and optionally reshape it
if len(input.size()) == 5:
bs, ncrops, c, h, w = input.size()
input = input.view(-1, c, h, w)
input = input.cuda(non_blocking=True)
# forward pass without grad computation
with torch.no_grad():
output = model(input)
if crop < 1 :
scr.append(torch.sum(output, 0, keepdim=True).cpu().numpy())
gts.append(target)
else:
scr[i] += output.cpu().numpy()
gts = np.concatenate(gts, axis=0).T
scr = np.concatenate(scr, axis=0).T
aps = []
for i in range(20):
# Subtract eps from score to make AP work for tied scores
ap = metrics.average_precision_score(gts[i][gts[i]<=1], scr[i][gts[i]<=1]-1e-5*gts[i][gts[i]<=1])
aps.append( ap )
print(np.mean(aps), ' ', ' '.join(['%0.2f'%a for a in aps]))
def train(loader, model, optimizer, criterion, fc6_8, losses, current_iteration=0, total_iterations=None, stepsize=None, verbose=True):
# to log
batch_time = AverageMeter()
data_time = AverageMeter()
top1 = AverageMeter()
end = time.time()
# use dropout for the MLP
if hasattr(model.body, 'classifier'):
model.train()
# in the batch norms always use global statistics
model.body.features.eval()
else:
model.eval()
for i, (input, target) in enumerate(loader):
# measure data loading time
data_time.update(time.time() - end)
# adjust learning rate
if current_iteration != 0 and current_iteration % stepsize == 0:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.5
print('iter {0} learning rate is {1}'.format(current_iteration, param_group['lr']))
# move input to gpu
input = input.cuda(non_blocking=True)
# forward pass with or without grad computation
output = model(input)
target = target.float().cuda()
mask = (target == 255)
loss = torch.sum(criterion(output, target).masked_fill_(mask, 0)) / target.size(0)
# backward
optimizer.zero_grad()
loss.backward()
# clip gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
# and weights update
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if verbose is True and current_iteration % 25 == 0:
print('Iteration[{0}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
current_iteration, batch_time=batch_time,
data_time=data_time, loss=losses))
current_iteration = current_iteration + 1
if total_iterations is not None and current_iteration == total_iterations:
break
return current_iteration
if __name__ == '__main__':
main()
|
DeeperCluster-main
|
eval_voc_classif.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import apex
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from src.clustering import get_cluster_assignments, load_cluster_assignments
from src.data.loader import get_data_transformations
from src.data.YFCC100M import YFCC100M_dataset
from src.model.model_factory import (build_prediction_layer, model_factory,
sgd_optimizer, to_cuda)
from src.model.pretrain import load_pretrained
from src.slurm import init_signal_handler
from src.trainer import train_network
from src.utils import (bool_flag, check_parameters, end_of_epoch, fix_random_seeds,
init_distributed_mode, initialize_exp, restart_from_checkpoint)
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Unsupervised feature learning.")
# handling experiment parameters
parser.add_argument("--checkpoint_freq", type=int, default=1,
help="Save the model every this epoch.")
parser.add_argument("--dump_path", type=str, default="./exp",
help="Experiment dump path.")
parser.add_argument('--epoch', type=int, default=0,
help='Current epoch to run.')
parser.add_argument('--start_iter', type=int, default=0,
help='First iter to run in the current epoch.')
# network params
parser.add_argument('--pretrained', type=str, default='',
help='Start from this instead of random weights.')
# datasets params
parser.add_argument('--data_path', type=str, default='',
help='Where to find training dataset.')
parser.add_argument('--size_dataset', type=int, default=10000000,
help='How many images to use.')
parser.add_argument('--workers', type=int, default=8,
help='Number of data loading workers.')
parser.add_argument('--sobel', type=bool_flag, default=0,
help='Apply Sobel filter.')
# optim params
parser.add_argument('--lr', type=float, default=0.1, help='Learning rate.')
parser.add_argument('--wd', type=float, default=1e-5, help='Weight decay.')
parser.add_argument('--nepochs', type=int, default=100,
help='Max number of epochs to run.')
parser.add_argument('--batch_size', default=48, type=int,
help='Batch-size per process.')
# Model params
parser.add_argument('--reassignment', type=int, default=3,
help='Reassign clusters every this epoch(s).')
parser.add_argument('--dim_pca', type=int, default=4096,
help='Dimension of the pca applied to the descriptors.')
parser.add_argument('--k', type=int, default=10000,
help='Total number of clusters.')
parser.add_argument('--super_classes', type=int, default=4,
help='Total number of super-classes.')
parser.add_argument('--rotnet', type=bool_flag, default=True,
help='Network needs to classify large rotations.')
# k-means params
parser.add_argument('--warm_restart', type=bool_flag, default=False,
help='Use previous centroids as init.')
parser.add_argument('--use_faiss', type=bool_flag, default=True,
help='Use faiss for E steps in k-means.')
parser.add_argument('--niter', type=int, default=10,
help='Number of k-means iterations.')
# distributed training params
parser.add_argument('--rank', default=0, type=int,
help='Global process rank.')
parser.add_argument("--local_rank", type=int, default=-1,
help="Multi-GPU - Local rank")
parser.add_argument('--world-size', default=1, type=int,
help='Number of distributed processes.')
parser.add_argument('--dist-url', default='', type=str,
help='Url used to set up distributed training.')
# debug
parser.add_argument("--debug_slurm", type=bool_flag, default=False,
help="Debug within a SLURM job.")
return parser.parse_args()
def main(args):
"""
This code implements the paper: https://arxiv.org/abs/1905.01278
The method consists in alternating between a hierachical clustering of the
features and learning the parameters of a convnet by predicting both the
angle of the rotation applied to the input data and the cluster assignments
in a single hierachical loss.
"""
# initialize communication groups
training_groups, clustering_groups = init_distributed_mode(args)
# check parameters
check_parameters(args)
# initialize the experiment
logger, training_stats = initialize_exp(args, 'epoch', 'iter', 'prec', 'loss',
'prec_super_class', 'loss_super_class',
'prec_sub_class', 'loss_sub_class')
# initialize SLURM signal handler for time limit / pre-emption
init_signal_handler()
# load data
dataset = YFCC100M_dataset(args.data_path, size=args.size_dataset)
# prepare the different data transformations
tr_cluster, tr_train = get_data_transformations(args.rotation * 90)
# build model skeleton
fix_random_seeds()
model = model_factory(args.sobel)
logger.info('model created')
# load pretrained weights
load_pretrained(model, args)
# convert batch-norm layers to nvidia wrapper to enable batch stats reduction
model = apex.parallel.convert_syncbn_model(model)
# distributed training wrapper
model = to_cuda(model, args.gpu_to_work_on, apex=True)
logger.info('model to cuda')
# set optimizer
optimizer = sgd_optimizer(model, args.lr, args.wd)
# load cluster assignments
cluster_assignments = load_cluster_assignments(args, dataset)
# build prediction layer on the super_class
pred_layer, optimizer_pred_layer = build_prediction_layer(
model.module.body.dim_output_space,
args,
)
nmb_sub_classes = args.k // args.nmb_super_clusters
sub_class_pred_layer, optimizer_sub_class_pred_layer = build_prediction_layer(
model.module.body.dim_output_space,
args,
num_classes=nmb_sub_classes,
group=training_groups[args.training_local_world_id],
)
# variables to fetch in checkpoint
to_restore = {'epoch': 0, 'start_iter': 0}
# re start from checkpoint
restart_from_checkpoint(
args,
run_variables=to_restore,
state_dict=model,
optimizer=optimizer,
pred_layer_state_dict=pred_layer,
optimizer_pred_layer=optimizer_pred_layer,
)
pred_layer_name = str(args.training_local_world_id) + '-pred_layer.pth.tar'
restart_from_checkpoint(
args,
ckp_path=os.path.join(args.dump_path, pred_layer_name),
state_dict=sub_class_pred_layer,
optimizer=optimizer_sub_class_pred_layer,
)
args.epoch = to_restore['epoch']
args.start_iter = to_restore['start_iter']
for _ in range(args.epoch, args.nepochs):
logger.info("============ Starting epoch %i ... ============" % args.epoch)
fix_random_seeds(args.epoch)
# step 1: Get the final activations for the whole dataset / Cluster them
if cluster_assignments is None and not args.epoch % args.reassignment:
logger.info("=> Start clustering step")
dataset.transform = tr_cluster
cluster_assignments = get_cluster_assignments(args, model, dataset, clustering_groups)
# reset prediction layers
if args.nmb_super_clusters > 1:
pred_layer, optimizer_pred_layer = build_prediction_layer(
model.module.body.dim_output_space,
args,
)
sub_class_pred_layer, optimizer_sub_class_pred_layer = build_prediction_layer(
model.module.body.dim_output_space,
args,
num_classes=nmb_sub_classes,
group=training_groups[args.training_local_world_id],
)
# step 2: Train the network with the cluster assignments as labels
# prepare dataset
dataset.transform = tr_train
dataset.sub_classes = cluster_assignments
# concatenate models and their corresponding optimizers
models = [model, pred_layer, sub_class_pred_layer]
optimizers = [optimizer, optimizer_pred_layer, optimizer_sub_class_pred_layer]
# train the network for one epoch
scores = train_network(args, models, optimizers, dataset)
## save training statistics
logger.info(scores)
training_stats.update(scores)
# reassign clusters at the next epoch
if not args.epoch % args.reassignment:
cluster_assignments = None
dataset.subset_indexes = None
end_of_epoch(args)
dist.barrier()
if __name__ == '__main__':
# generate parser / parse parameters
args = get_parser()
# run experiment
main(args)
|
DeeperCluster-main
|
main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from logging import getLogger
import os
import time
import numpy as np
from sklearn import metrics
import torch
import torch.nn as nn
import torch.utils.data
from src.data.loader import load_data, get_data_transformations, KFold, per_target
from src.model.model_factory import model_factory, to_cuda, sgd_optimizer
from src.model.pretrain import load_pretrained
from src.slurm import init_signal_handler, trigger_job_requeue
from src.trainer import validate_network, accuracy
from src.data.VOC2007 import VOC2007_dataset
from src.utils import (bool_flag, init_distributed_mode, initialize_exp, AverageMeter,
restart_from_checkpoint, fix_random_seeds,)
logger = getLogger()
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Train a linear classifier on conv layer")
# main parameters
parser.add_argument("--dump_path", type=str, default=".",
help="Experiment dump path")
parser.add_argument('--epoch', type=int, default=0,
help='Current epoch to run')
parser.add_argument('--start_iter', type=int, default=0,
help='First iter to run in the current epoch')
# model params
parser.add_argument('--pretrained', type=str, default='',
help='Use this instead of random weights.')
parser.add_argument('--conv', type=int, default=1, choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
help='On top of which layer train classifier.')
# datasets params
parser.add_argument('--data_path', type=str, default='',
help='Where to find supervised dataset')
parser.add_argument('--workers', type=int, default=8,
help='Number of data loading workers')
parser.add_argument('--sobel', type=bool_flag, default=False)
# optim params
parser.add_argument('--lr', type=float, default=0.05, help='Learning rate')
parser.add_argument('--wd', type=float, default=1e-5, help='Weight decay')
parser.add_argument('--nepochs', type=int, default=100,
help='Max number of epochs to run')
parser.add_argument('--batch_size', default=64, type=int)
# model selection
parser.add_argument('--split', type=str, required=False, default='train', choices=['train', 'trainval'],
help='for PASCAL dataset, train on train or train+val')
parser.add_argument('--kfold', type=int, default=None,
help="""dataset randomly partitioned into kfold equal sized subsamples.
Default None: no cross validation: train on full train set""")
parser.add_argument('--cross_valid', type=int, default=None,
help='between 0 and kfold - 1: index of the round of cross validation')
# distributed training params
parser.add_argument('--rank', default=0, type=int,
help='rank')
parser.add_argument("--local_rank", type=int, default=-1,
help="Multi-GPU - Local rank")
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='', type=str,
help='url used to set up distributed training')
# debug
parser.add_argument("--debug_slurm", type=bool_flag, default=False,
help="Debug within a SLURM job")
return parser.parse_args()
def main(args):
# initialize the multi-GPU / multi-node training
init_distributed_mode(args, make_communication_groups=False)
# initialize the experiment
logger, training_stats = initialize_exp(args, 'epoch', 'iter', 'prec',
'loss', 'prec_val', 'loss_val')
# initialize SLURM signal handler for time limit / pre-emption
init_signal_handler()
if not 'pascal' in args.data_path:
main_data_path = args.data_path
args.data_path = os.path.join(main_data_path, 'train')
train_dataset = load_data(args)
else:
train_dataset = VOC2007_dataset(args.data_path, split=args.split)
args.test = 'val' if args.split == 'train' else 'test'
if not 'pascal' in args.data_path:
if args.cross_valid is None:
args.data_path = os.path.join(main_data_path, 'val')
val_dataset = load_data(args)
else:
val_dataset = VOC2007_dataset(args.data_path, split=args.test)
if args.cross_valid is not None:
kfold = KFold(per_target(train_dataset.imgs), args.cross_valid, args.kfold)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, sampler=kfold.train,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, sampler=kfold.val,
num_workers=args.workers)
else:
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
# prepare the different data transformations
tr_val, tr_train = get_data_transformations()
train_dataset.transform = tr_train
val_dataset.transform = tr_val
# build model skeleton
fix_random_seeds()
model = model_factory(args)
load_pretrained(model, args)
# keep only conv layers
model.body.classifier = None
model.conv = args.conv
if 'places' in args.data_path:
nmb_classes = 205
elif 'pascal' in args.data_path:
nmb_classes = 20
else:
nmb_classes = 1000
reglog = RegLog(nmb_classes, args.conv)
# distributed training wrapper
model = to_cuda(model, [args.gpu_to_work_on], apex=True)
reglog = to_cuda(reglog, [args.gpu_to_work_on], apex=True)
logger.info('model to cuda')
# set optimizer
optimizer = sgd_optimizer(reglog, args.lr, args.wd)
## variables to reload to fetch in checkpoint
to_restore = {'epoch': 0, 'start_iter': 0}
# re start from checkpoint
restart_from_checkpoint(
args,
run_variables=to_restore,
state_dict=reglog,
optimizer=optimizer,
)
args.epoch = to_restore['epoch']
args.start_iter = to_restore['start_iter']
model.eval()
reglog.train()
# Linear training
for _ in range(args.epoch, args.nepochs):
logger.info("============ Starting epoch %i ... ============" % args.epoch)
# train the network for one epoch
scores = train_network(args, model, reglog, optimizer, train_loader)
if not 'pascal' in args.data_path:
scores_val = validate_network(val_loader, [model, reglog], args)
else:
scores_val = evaluate_pascal(val_dataset, [model, reglog])
scores = scores + scores_val
# save training statistics
logger.info(scores)
training_stats.update(scores)
def evaluate_pascal(val_dataset, models):
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=torch.utils.data.distributed.DistributedSampler(val_dataset),
batch_size=1,
num_workers=args.workers,
pin_memory=True,
)
for model in models:
model.eval()
gts = []
scr = []
for i, (input, target) in enumerate(val_loader):
# move input to gpu and optionally reshape it
input = input.cuda(non_blocking=True)
# forward pass without grad computation
with torch.no_grad():
output = models[0](input)
output = models[1](output)
scr.append(torch.sum(output, 0, keepdim=True).cpu().numpy())
gts.append(target)
scr[i] += output.cpu().numpy()
gts = np.concatenate(gts, axis=0).T
scr = np.concatenate(scr, axis=0).T
aps = []
for i in range(20):
# Subtract eps from score to make AP work for tied scores
ap = metrics.average_precision_score(gts[i][gts[i]<=1], scr[i][gts[i]<=1]-1e-5*gts[i][gts[i]<=1])
aps.append(ap)
print(np.mean(aps), ' ', ' '.join(['%0.2f'%a for a in aps]))
return np.mean(aps), 0
class RegLog(nn.Module):
"""Creates logistic regression on top of frozen features"""
def __init__(self, num_labels, conv):
super(RegLog, self).__init__()
if conv < 3:
av = 18
s = 9216
elif conv < 5:
av = 14
s = 8192
elif conv < 8:
av = 9
s = 9216
elif conv < 11:
av = 6
s = 8192
elif conv < 14:
av = 3
s = 8192
self.av_pool = nn.AvgPool2d(av, stride=av, padding=0)
self.linear = nn.Linear(s, num_labels)
def forward(self, x):
x = self.av_pool(x)
x = x.view(x.size(0), -1)
return self.linear(x)
def train_network(args, model, reglog, optimizer, loader):
"""
Train the models on the dataset.
"""
# running statistics
batch_time = AverageMeter()
data_time = AverageMeter()
# training statistics
log_top1 = AverageMeter()
log_loss = AverageMeter()
end = time.perf_counter()
if 'pascal' in args.data_path:
criterion = nn.BCEWithLogitsLoss(reduction='none')
else:
criterion = nn.CrossEntropyLoss().cuda()
for iter_epoch, (inp, target) in enumerate(loader):
# measure data loading time
data_time.update(time.perf_counter() - end)
learning_rate_decay(optimizer, len(loader) * args.epoch + iter_epoch, args.lr)
# start at iter start_iter
if iter_epoch < args.start_iter:
continue
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if 'pascal' in args.data_path:
target = target.float()
# forward
with torch.no_grad():
output = model(inp)
output = reglog(output)
# compute cross entropy loss
loss = criterion(output, target)
if 'pascal' in args.data_path:
mask = (target == 255)
loss = torch.sum(loss.masked_fill_(mask, 0)) / target.size(0)
optimizer.zero_grad()
# compute the gradients
loss.backward()
# step
optimizer.step()
# log
# signal received, relaunch experiment
if os.environ['SIGNAL_RECEIVED'] == 'True':
if not args.rank:
torch.save({
'epoch': args.epoch,
'start_iter': iter_epoch + 1,
'state_dict': reglog.state_dict(),
'optimizer': optimizer.state_dict(),
}, os.path.join(args.dump_path, 'checkpoint.pth.tar'))
trigger_job_requeue(os.path.join(args.dump_path, 'checkpoint.pth.tar'))
# update stats
log_loss.update(loss.item(), output.size(0))
if not 'pascal' in args.data_path:
prec1 = accuracy(args, output, target)
log_top1.update(prec1.item(), output.size(0))
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
# verbose
if iter_epoch % 100 == 0:
logger.info('Epoch[{0}] - Iter: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec {log_top1.val:.3f} ({log_top1.avg:.3f})\t'
.format(args.epoch, iter_epoch, len(loader), batch_time=batch_time,
data_time=data_time, loss=log_loss, log_top1=log_top1))
# end of epoch
args.start_iter = 0
args.epoch += 1
# dump checkpoint
if not args.rank:
torch.save({
'epoch': args.epoch,
'start_iter': 0,
'state_dict': reglog.state_dict(),
'optimizer': optimizer.state_dict(),
}, os.path.join(args.dump_path, 'checkpoint.pth.tar'))
return (args.epoch - 1, args.epoch * len(loader), log_top1.avg, log_loss.avg)
def learning_rate_decay(optimizer, t, lr_0):
for param_group in optimizer.param_groups:
lr = lr_0 / np.sqrt(1 + lr_0 * param_group['weight_decay'] * t)
param_group['lr'] = lr
if __name__ == '__main__':
# generate parser / parse parameters
args = get_parser()
# run experiment
main(args)
|
DeeperCluster-main
|
eval_linear.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
import os
import pickle
import faiss
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
import numpy as np
from .utils import PCA, AverageMeter, normalize, get_indices_sparse
from .distributed_kmeans import distributed_kmeans, initialize_cache
logger = getLogger()
def get_cluster_assignments(args, model, dataset, groups):
"""
"""
# pseudo-labels are confusing
dataset.sub_classes = None
# swith to eval mode
model.eval()
# this process deals only with a subset of the dataset
local_nmb_data = len(dataset) // args.world_size
indices = torch.arange(args.rank * local_nmb_data, (args.rank + 1) * local_nmb_data).int()
if os.path.isfile(os.path.join(args.dump_path, 'super_class_assignments.pkl')):
# super-class assignments have already been computed in a previous run
super_class_assignements = pickle.load(open(os.path.join(args.dump_path, 'super_class_assignments.pkl'), 'rb'))
logger.info('loaded super-class assignments')
# dump cache
where_helper = get_indices_sparse(super_class_assignements[indices])
nmb_data_per_super_cluster = torch.zeros(args.nmb_super_clusters).cuda()
for super_class in range(len(where_helper)):
nmb_data_per_super_cluster[super_class] = len(where_helper[super_class][0])
else:
sampler = Subset_Sampler(indices)
# we need a data loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
num_workers=args.workers,
pin_memory=True,
)
# initialize cache, pca and centroids
cache, centroids = initialize_cache(args, loader, model)
# empty cuda cache (useful because we're about to use faiss on gpu)
torch.cuda.empty_cache()
## perform clustering into super_clusters
super_class_assignements, centroids_sc = distributed_kmeans(
args,
args.size_dataset,
args.nmb_super_clusters,
cache,
args.rank,
args.world_size,
centroids,
)
# dump activations in the cache
where_helper = get_indices_sparse(super_class_assignements[indices])
nmb_data_per_super_cluster = torch.zeros(args.nmb_super_clusters).cuda()
for super_class in range(len(where_helper)):
ind_sc = where_helper[super_class][0]
np.save(open(os.path.join(
args.dump_path,
'cache/',
'super_class' + str(super_class) + '-' + str(args.rank),
), 'wb'), cache[ind_sc])
nmb_data_per_super_cluster[super_class] = len(ind_sc)
dist.barrier()
# dump super_class assignment and centroids of super_class
if not args.rank:
pickle.dump(
super_class_assignements,
open(os.path.join(args.dump_path, 'super_class_assignments.pkl'), 'wb'),
)
pickle.dump(
centroids_sc,
open(os.path.join(args.dump_path, 'super_class_centroids.pkl'), 'wb'),
)
# size of the different super clusters
all_counts = [torch.zeros(args.nmb_super_clusters).cuda() for _ in range(args.world_size)]
dist.all_gather(all_counts, nmb_data_per_super_cluster)
all_counts = torch.cat(all_counts).cpu().long()
all_counts = all_counts.reshape(args.world_size, args.nmb_super_clusters)
logger.info(all_counts.sum(dim=0))
# what are the data belonging to this super class
dataset.subset_indexes = np.where(super_class_assignements == args.clustering_local_world_id)[0]
div = args.batch_size * args.clustering_local_world_size
dataset.subset_indexes = dataset.subset_indexes[:len(dataset) // div * div]
dist.barrier()
# which files this process is going to read
local_nmb_data = int(len(dataset) / args.clustering_local_world_size)
low = np.long(args.clustering_local_rank * local_nmb_data)
high = np.long(low + local_nmb_data)
curr_ind = 0
cache = torch.zeros(local_nmb_data, args.dim_pca, dtype=torch.float32)
cumsum = torch.cumsum(all_counts[:, args.clustering_local_world_id].long(), 0).long()
for r in range(args.world_size):
# data in this bucket r: [cumsum[r - 1] : cumsum[r] - 1]
low_bucket = np.long(cumsum[r - 1]) if r else 0
# this bucket is empty
if low_bucket > cumsum[r] - 1:
continue
if cumsum[r] - 1 < low:
continue
if low_bucket >= high:
break
# which are the data we are interested in inside this bucket ?
ind_low = np.long(max(low, low_bucket))
ind_high = np.long(min(high, cumsum[r]))
cache_r = np.load(open(os.path.join(args.dump_path, 'cache/', 'super_class' + str(args.clustering_local_world_id) + '-' + str(r)), 'rb'))
cache[curr_ind: curr_ind + ind_high - ind_low] = torch.FloatTensor(cache_r[ind_low - low_bucket: ind_high - low_bucket])
curr_ind += (ind_high - ind_low)
# randomly pick some centroids and dump them
centroids_path = os.path.join(args.dump_path, 'centroids' + str(args.clustering_local_world_id) + '.pkl')
if not args.clustering_local_rank:
centroids = cache[np.random.choice(
np.arange(cache.shape[0]),
replace=cache.shape[0] < args.k // args.nmb_super_clusters,
size=args.k // args.nmb_super_clusters,
)]
pickle.dump(centroids, open(centroids_path, 'wb'), -1)
dist.barrier()
# read centroids
centroids = pickle.load(open(centroids_path, 'rb')).cuda()
# distributed kmeans into sub-classes
cluster_assignments, centroids = distributed_kmeans(
args,
len(dataset),
args.k // args.nmb_super_clusters,
cache,
args.clustering_local_rank,
args.clustering_local_world_size,
centroids,
world_id=args.clustering_local_world_id,
group=groups[args.clustering_local_world_id],
)
# free RAM
del cache
# write cluster assignments and centroids
if not args.clustering_local_rank:
pickle.dump(
cluster_assignments,
open(os.path.join(args.dump_path, 'cluster_assignments' + str(args.clustering_local_world_id) + '.pkl'), 'wb'),
)
pickle.dump(
centroids,
open(centroids_path, 'wb'),
)
dist.barrier()
return cluster_assignments
class Subset_Sampler(Sampler):
"""
Sample indices.
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
def load_cluster_assignments(args, dataset):
"""
Load cluster assignments if they are present in experiment repository.
"""
super_file = os.path.join(args.dump_path, 'super_class_assignments.pkl')
sub_file = os.path.join(
args.dump_path,
'sub_class_assignments' + str(args.clustering_local_world_id) + '.pkl',
)
if os.path.isfile(super_file) and os.path.isfile(sub_file):
super_class_assignments = pickle.load(open(super_file, 'rb'))
dataset.subset_indexes = np.where(super_class_assignments == args.clustering_local_world_id)[0]
div = args.batch_size * args.clustering_local_world_size
clustering_size_dataset = len(dataset) // div * div
dataset.subset_indexes = dataset.subset_indexes[:clustering_size_dataset]
logger.info('Found cluster assignments in experiment repository')
return pickle.load(open(sub_file, "rb"))
return None
|
DeeperCluster-main
|
src/clustering.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
import os
import signal
import time
logger = getLogger()
def trigger_job_requeue(checkpoint_filename):
''' Submit a new job to resume from checkpoint.
Be careful to use only for main process.
'''
if int(os.environ['SLURM_PROCID']) == 0 and \
str(os.getpid()) == os.environ['MAIN_PID'] and os.path.isfile(checkpoint_filename):
print('time is up, back to slurm queue', flush=True)
command = 'scontrol requeue ' + os.environ['SLURM_JOB_ID']
print(command)
if os.system(command):
raise RuntimeError('requeue failed')
print('New job submitted to the queue', flush=True)
exit(0)
def SIGTERMHandler(a, b):
print('received sigterm')
pass
def signalHandler(a, b):
print('Signal received', a, time.time(), flush=True)
os.environ['SIGNAL_RECEIVED'] = 'True'
return
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
os.environ['SIGNAL_RECEIVED'] = 'False'
os.environ['MAIN_PID'] = str(os.getpid())
signal.signal(signal.SIGUSR1, signalHandler)
signal.signal(signal.SIGTERM, SIGTERMHandler)
print("Signal handler installed.", flush=True)
|
DeeperCluster-main
|
src/slurm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
DeeperCluster-main
|
src/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import logging
import time
from datetime import timedelta
import pandas as pd
class LogFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime('%x %X'),
timedelta(seconds=elapsed_seconds)
)
message = record.getMessage()
message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3))
return "%s - %s" % (prefix, message) if message else ''
def create_logger(filepath, rank):
"""
Create a logger.
Use a different log file for each process.
"""
# create log formatter
log_formatter = LogFormatter()
# create file handler and set level to debug
if filepath is not None:
if rank > 0:
filepath = '%s-%i' % (filepath, rank)
file_handler = logging.FileHandler(filepath, "a")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
# create console handler and set level to info
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
# create logger and set level to debug
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.propagate = False
if filepath is not None:
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# reset logger elapsed time
def reset_time():
log_formatter.start_time = time.time()
logger.reset_time = reset_time
return logger
class PD_Stats(object):
"""
Log stuff with pandas library
"""
def __init__(self, path, columns):
self.path = path
# reload path stats
if os.path.isfile(self.path):
self.stats = pd.read_pickle(self.path)
# check that columns are the same
assert list(self.stats.columns) == list(columns)
else:
self.stats = pd.DataFrame(columns=columns)
def update(self, row, save=True):
self.stats.loc[len(self.stats.index)] = row
# save the statistics
if save:
self.stats.to_pickle(self.path)
|
DeeperCluster-main
|
src/logger.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
import os
import pickle
import time
import faiss
import numpy as np
import torch
import torch.distributed as dist
from .utils import fix_random_seeds, AverageMeter, PCA, normalize
logger = getLogger()
def initialize_cache(args, loader, model):
"""
Accumulate features to compute pca.
Cache the dataset.
"""
# we limit the size of the cache per process
local_cache_size = min(len(loader), 3150000 // args.batch_size) * args.batch_size
# total batch_size
batch_size = args.batch_size * args.world_size
# how many batches do we need to approximate the covariance matrix
N = model.module.body.dim_output_space
nmb_batches_for_pca = int(N * (N - 1) / 2 / args.batch_size / args.world_size)
logger.info("Require {} images ({} iterations) for pca".format(
nmb_batches_for_pca * args.batch_size * args.world_size, nmb_batches_for_pca))
if nmb_batches_for_pca > len(loader):
nmb_batches_for_pca = len(loader)
logger.warning("Compute the PCA on {} images (entire dataset)".format(args.size_dataset))
# statistics
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
with torch.no_grad():
for i, (input_tensor, _) in enumerate(loader):
# time spent to load data
data_time.update(time.time() - end)
# move to gpu
input_tensor = input_tensor.type(torch.FloatTensor).cuda()
# forward
feat = model(input_tensor)
# before the pca has been computed
if i < nmb_batches_for_pca:
# gather the features computed by all processes
all_feat = [torch.cuda.FloatTensor(feat.size()) for src in range(args.world_size)]
dist.all_gather(all_feat, feat)
# only main process computes the PCA
if not args.rank:
all_feat = torch.cat(all_feat).cpu().numpy()
# initialize storage arrays
if i == 0:
if not args.rank:
for_pca = np.zeros(
(nmb_batches_for_pca * batch_size, all_feat.shape[1]),
dtype=np.float32,
)
for_cache = torch.zeros(
nmb_batches_for_pca * args.batch_size,
feat.size(1),
dtype=torch.float32,
)
# fill in arrays
if not args.rank:
for_pca[i * batch_size: (i + 1) * batch_size] = all_feat
for_cache[i * args.batch_size: (i + 1) * args.batch_size] = feat.cpu()
# train the pca
if i == nmb_batches_for_pca - 1:
pca_path = os.path.join(args.dump_path, 'pca.pkl')
centroids_path = os.path.join(args.dump_path, 'centroids.pkl')
# compute the PCA
if not args.rank:
# init PCA object
pca = PCA(dim=args.dim_pca, whit=0.5)
# center data
mean = np.mean(for_pca, axis=0).astype('float32')
for_pca -= mean
# compute covariance
cov = np.dot(for_pca.T, for_pca) / for_pca.shape[0]
# calculate the pca
pca.train_pca(cov)
# randomly pick some centroids
centroids = pca.apply(for_pca[np.random.choice(
np.arange(for_pca.shape[0]),
replace=False,
size=args.nmb_super_clusters,
)])
centroids = normalize(centroids)
pca.mean = mean
# free memory
del for_pca
# write PCA to disk
pickle.dump(pca, open(pca_path, 'wb'))
pickle.dump(centroids, open(centroids_path, 'wb'))
# processes wait that main process compute and write PCA and centroids
dist.barrier()
# processes read PCA and centroids from disk
pca = pickle.load(open(pca_path, "rb"))
centroids = pickle.load(open(centroids_path, "rb"))
# apply the pca to the cached features
for_cache = pca.apply(for_cache)
for_cache = normalize(for_cache)
# extend the cache
current_cache_size = for_cache.size(0)
for_cache = torch.cat((for_cache, torch.zeros(
local_cache_size - current_cache_size,
args.dim_pca,
)))
logger.info('{0} imgs cached => cache is {1:.2f} % full'
.format(current_cache_size, 100 * current_cache_size / local_cache_size))
# keep accumulating data
if i > nmb_batches_for_pca - 1:
feat = pca.apply(feat)
feat = normalize(feat)
for_cache[i * args.batch_size: (i + 1) * args.batch_size] = feat.cpu()
# verbose
batch_time.update(time.time() - end)
end = time.time()
if i % 200 == 0:
logger.info('{0} / {1}\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Time: {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(i, len(loader), batch_time=batch_time, data_time=data_time))
# move centroids to GPU
centroids = torch.cuda.FloatTensor(centroids)
return for_cache, centroids
def distributed_kmeans(args, n_all, nk, cache, rank, world_size, centroids, world_id=0, group=None):
"""
Distributed mini-batch k-means.
"""
# local assignments
assignments = -1 * np.ones(n_all // world_size)
# prepare faiss index
if args.use_faiss:
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.device = args.gpu_to_work_on
index = faiss.GpuIndexFlatL2(res, args.dim_pca, cfg)
end = time.time()
for p in range(args.niter + 1):
start_pass = time.time()
# running statistics
batch_time = AverageMeter()
log_loss = AverageMeter()
# initialize arrays for update
local_counts = torch.zeros(nk).cuda()
local_feats = torch.zeros(nk, args.dim_pca).cuda()
# prepare E step
torch.cuda.empty_cache()
if args.use_faiss:
index.reset()
index.add(centroids.cpu().numpy().astype('float32'))
else:
centroids_L2_norm = centroids.norm(dim=1)**2
nmb_batches = n_all // world_size // args.batch_size
for it in range(nmb_batches):
# fetch mini-batch
feat = cache[it * args.batch_size: (it + 1) * args.batch_size]
# E-step
if args.use_faiss:
D, I = index.search(feat.numpy().astype('float32'), 1)
I = I.squeeze(1)
else:
# find current cluster assignments
l2dist = 1 - 2 * torch.mm(feat.cuda(non_blocking=True), centroids.transpose(0, 1)) + centroids_L2_norm
D, I = l2dist.min(dim=1)
I = I.cpu().numpy()
D = D.cpu().numpy()
# update assignment array
assignments[it * args.batch_size: (it + 1) * args.batch_size] = I
# log
log_loss.update(D.mean())
for k in np.unique(I):
idx_k = np.where(I == k)[0]
# number of elmt in cluster k for this batch
local_counts[k] += len(idx_k)
# sum of elmt belonging to this cluster
local_feats[k, :] += feat.cuda(non_blocking=True)[idx_k].sum(dim=0)
batch_time.update(time.time() - end)
end = time.time()
if it and it % 1000 == 0:
logger.info('Pass[{0}] - Iter: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
.format(p, it, nmb_batches, batch_time=batch_time))
# all reduce operation
# processes share what it is needed for M-step
if group is not None:
dist.all_reduce(local_counts, group=group)
dist.all_reduce(local_feats, group=group)
else:
dist.all_reduce(local_counts)
dist.all_reduce(local_feats)
# M-step
# update centroids (for the last pass we only want the assignments)
mask = local_counts.nonzero()
if p < args.niter:
centroids[mask] = 1. / local_counts[mask].unsqueeze(1) * local_feats[mask]
# deal with empty clusters
for k in (local_counts == 0).nonzero():
# choose a random cluster from the set of non empty clusters
np.random.seed(world_id)
m = mask[np.random.randint(len(mask))]
# replace empty centroid by a non empty one with a perturbation
centroids[k] = centroids[m]
for j in range(args.dim_pca):
sign = (j % 2) * 2 - 1;
centroids[k, j] += sign * 1e-7;
centroids[m, j] -= sign * 1e-7;
# update the counts
local_counts[k] = local_counts[m] // 2;
local_counts[m] -= local_counts[k];
# update the assignments
assignments[np.where(assignments == m.item())[0][: int(local_counts[m])]] = k.cpu()
logger.info('cluster {} empty => split cluster {}'.format(k, m))
logger.info(' # Pass[{0}]\tTime {1:.3f}\tLoss {2:.4f}'
.format(p, time.time() - start_pass, log_loss.avg))
# now each process needs to share its own set of pseudo_labels
# where to write / read the pseudo_labels
dump_labels = os.path.join(
args.dump_path,
'pseudo_labels' + str(world_id) + '-' + str(rank) + '.pkl',
)
# log the cluster assignment
pickle.dump(
assignments,
open(dump_labels, 'wb'),
-1,
)
# process wait for all processes to finish writing
if group is not None:
dist.barrier(group=group)
else:
dist.barrier()
pseudo_labels = np.zeros(n_all)
# process read and reconstitute the pseudo_labels
local_nmb_data = n_all // world_size
for r in range(world_size):
pseudo_labels[torch.arange(r * local_nmb_data, (r + 1) * local_nmb_data).int()] = \
pickle.load(open(os.path.join(args.dump_path, 'pseudo_labels' + str(world_id) + '-' + str(r) + '.pkl'), "rb"))
# clean
del assignments
dist.barrier()
os.remove(dump_labels)
return pseudo_labels, centroids.cpu()
|
DeeperCluster-main
|
src/distributed_kmeans.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from logging import getLogger
import os
import pickle
import shutil
import time
import numpy as np
from scipy.sparse import csr_matrix
import torch
import torch.distributed as dist
from .logger import create_logger, PD_Stats
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
logger = getLogger()
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def init_distributed_mode(args, make_communication_groups=True):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- global rank
- clustering_local_rank
- clustering_local_world_size
- clustering_local_world_id
- training_local_rank
- training_local_world_size
- training_local_world_id
- rotation
"""
args.is_slurm_job = 'SLURM_JOB_ID' in os.environ and not args.debug_slurm
if args.is_slurm_job:
args.rank = int(os.environ['SLURM_PROCID'])
else:
# jobs started with torch.distributed.launch
# read environment variables
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
# prepare distributed
dist.init_process_group(backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# set cuda device
args.gpu_to_work_on = args.rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu_to_work_on)
if not make_communication_groups:
return None, None
# each super_class has the same number of processes
assert args.world_size % args.super_classes == 0
# each super-class forms a training communication group
args.training_local_world_size = args.world_size // args.super_classes
args.training_local_rank = args.rank % args.training_local_world_size
args.training_local_world_id = args.rank // args.training_local_world_size
# prepare training groups
training_groups = []
for group_id in range(args.super_classes):
ranks = [args.training_local_world_size * group_id + i \
for i in range(args.training_local_world_size)]
training_groups.append(dist.new_group(ranks=ranks))
# compute number of super-clusters
if args.rotnet:
assert args.super_classes % 4 == 0
args.nmb_super_clusters = args.super_classes // 4
else:
args.nmb_super_clusters = args.super_classes
# prepare clustering communication groups
args.clustering_local_world_size = args.training_local_world_size * \
(args.super_classes // args.nmb_super_clusters)
args.clustering_local_rank = args.rank % args.clustering_local_world_size
args.clustering_local_world_id = args.rank // args.clustering_local_world_size
clustering_groups = []
for group_id in range(args.nmb_super_clusters):
ranks = [args.clustering_local_world_size * group_id + i \
for i in range(args.clustering_local_world_size)]
clustering_groups.append(dist.new_group(ranks=ranks))
# this process deals only with a certain rotation
if args.rotnet:
args.rotation = args.clustering_local_rank // args.training_local_world_size
else:
args.rotation = 0
return training_groups, clustering_groups
def check_parameters(args):
"""
Check if corpus of arguments is consistent.
"""
args.size_dataset = min(args.size_dataset, 95920149)
# make dataset size divisible by both the batch-size and the world-size
div = args.batch_size * args.world_size
args.size_dataset = args.size_dataset // div * div
args.epoch_size = args.size_dataset // args.nmb_super_clusters // 4
args.epoch_size = args.epoch_size // div * div
assert args.super_classes
# number of super classes must be divisible by the number of rotation categories
if args.rotnet:
assert args.super_classes % 4 == 0
# feature dimension
assert args.dim_pca <= 4096
def initialize_exp(params, *args):
"""
Initialize the experience:
- dump parameters
- create checkpoint and cache repos
- create a logger
- create a panda object to log the training statistics
"""
# dump parameters
pickle.dump(params, open(os.path.join(params.dump_path, 'params.pkl'), 'wb'))
# create repo to store checkpoints
params.dump_checkpoints = os.path.join(params.dump_path, 'checkpoints')
if not params.rank and not os.path.isdir(params.dump_checkpoints):
os.mkdir(params.dump_checkpoints)
# create repo to cache activations between the two stages of the hierarchical k-means
if not params.rank and not os.path.isdir(os.path.join(params.dump_path, 'cache')):
os.mkdir(os.path.join(params.dump_path, 'cache'))
# create a panda object to log loss and acc
training_stats = PD_Stats(
os.path.join(params.dump_path, 'stats' + str(params.rank) + '.pkl'),
args,
)
# create a logger
logger = create_logger(os.path.join(params.dump_path, 'train.log'), rank=params.rank)
logger.info("============ Initialized logger ============")
logger.info("\n".join("%s: %s" % (k, str(v))
for k, v in sorted(dict(vars(params)).items())))
logger.info("The experiment will be stored in %s\n" % params.dump_path)
logger.info("")
return logger, training_stats
def end_of_epoch(args):
"""
Remove cluster assignment from experiment repository
"""
def src_dst(what, cl=False):
src = os.path.join(
args.dump_path,
what + cl * str(args.clustering_local_world_id) + '.pkl',
)
dst = os.path.join(
args.dump_checkpoints,
what + '{}-epoch{}.pkl'.format(cl * args.clustering_local_world_id, args.epoch - 1),
)
return src, dst
# main processes only are working here
if not args.clustering_local_rank:
for what in ['cluster_assignments', 'centroids']:
src, dst = src_dst(what, cl=True)
if not (args.epoch - 1) % args.checkpoint_freq:
shutil.copy(src, dst)
if not 'centroids' in src:
os.remove(src)
if not args.rank:
for what in ['super_class_assignments', 'super_class_centroids']:
src, dst = src_dst(what)
if not (args.epoch - 1) % args.checkpoint_freq:
shutil.copy(src, dst)
os.remove(src)
def restart_from_checkpoint(args, ckp_path=None, run_variables=None, **kwargs):
"""
Re-start from checkpoint present in experiment repo
"""
if ckp_path is None:
ckp_path = os.path.join(args.dump_path, 'checkpoint.pth.tar')
# look for a checkpoint in exp repository
if not os.path.isfile(ckp_path):
return
logger.info('Found checkpoint in experiment repository')
# open checkpoint file
map_location = None
if args.world_size > 1:
map_location = "cuda:" + str(args.gpu_to_work_on)
checkpoint = torch.load(ckp_path, map_location=map_location)
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
value.load_state_dict(checkpoint[key])
logger.info("=> loaded {} from checkpoint '{}'"
.format(key, ckp_path))
else:
logger.warning("=> failed to load {} from checkpoint '{}'"
.format(key, ckp_path))
# re load variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name]
def fix_random_seeds(seed=1993):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
class PCA():
"""
Class to compute and apply PCA.
"""
def __init__(self, dim=256, whit=0.5):
self.dim = dim
self.whit = whit
self.mean = None
def train_pca(self, cov):
"""
Takes a covariance matrix (np.ndarray) as input.
"""
d, v = np.linalg.eigh(cov)
eps = d.max() * 1e-5
n_0 = (d < eps).sum()
if n_0 > 0:
d[d < eps] = eps
# total energy
totenergy = d.sum()
# sort eigenvectors with eigenvalues order
idx = np.argsort(d)[::-1][:self.dim]
d = d[idx]
v = v[:, idx]
logger.warning("keeping %.2f %% of the energy" % (d.sum() / totenergy * 100.0))
# for the whitening
d = np.diag(1. / d**self.whit)
# principal components
self.dvt = np.dot(d, v.T)
def apply(self, x):
# input is from numpy
if isinstance(x, np.ndarray):
if self.mean is not None:
x -= self.mean
return np.dot(self.dvt, x.T).T
# input is from torch and is on GPU
if x.is_cuda:
if self.mean is not None:
x -= torch.cuda.FloatTensor(self.mean)
return torch.mm(torch.cuda.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1)
# input if from torch, on CPU
if self.mean is not None:
x -= torch.FloatTensor(self.mean)
return torch.mm(torch.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1)
class AverageMeter(object):
"""computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def normalize(data):
# data in numpy array
if isinstance(data, np.ndarray):
row_sums = np.linalg.norm(data, axis=1)
data = data / row_sums[:, np.newaxis]
return data
# data is a tensor
row_sums = data.norm(dim=1, keepdim=True)
data = data / row_sums
return data
def compute_M(data):
cols = np.arange(data.size)
return csr_matrix((cols, (data.ravel(), cols)),
shape=(data.max() + 1, data.size))
def get_indices_sparse(data):
M = compute_M(data)
return [np.unravel_index(row.data, data.shape) for row in M]
|
DeeperCluster-main
|
src/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
import os
import shutil
import time
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data.sampler import Sampler
from .utils import AverageMeter, get_indices_sparse
from src.slurm import trigger_job_requeue
logger = getLogger()
class DistUnifTargSampler(Sampler):
"""
Distributively samples elements based on a uniform distribution over the labels.
"""
def __init__(self, total_size, pseudo_labels, num_replicas, rank, seed=31):
np.random.seed(seed)
# world size
self.num_replicas = num_replicas
# rank of this process
self.rank = rank
# how many data to be loaded by the corpus of processes
self.total_size = total_size
# set of labels to consider
set_of_pseudo_labels = np.unique(pseudo_labels)
nmb_pseudo_lab = int(len(set_of_pseudo_labels))
# number of images per label
per_label = int(self.total_size // nmb_pseudo_lab + 1)
# initialize indexes
epoch_indexes = np.zeros(int(per_label * nmb_pseudo_lab))
# select a number of per_label data for each label
indexes = get_indices_sparse(np.asarray(pseudo_labels))
for i, k in enumerate(set_of_pseudo_labels):
k = int(k)
label_indexes = indexes[k][0]
epoch_indexes[i * per_label: (i + 1) * per_label] = np.random.choice(
label_indexes,
per_label,
replace=(len(label_indexes) <= per_label)
)
# make sure indexes are integers
epoch_indexes = epoch_indexes.astype(int)
# shuffle the indexes
np.random.shuffle(epoch_indexes)
self.epoch_indexes = epoch_indexes[:self.total_size]
# this process only deals with this subset
self.process_ind = self.epoch_indexes[self.rank:self.total_size:self.num_replicas]
def __iter__(self):
return iter(self.process_ind)
def __len__(self):
return len(self.process_ind)
def train_network(args, models, optimizers, dataset):
"""
Train the models with cluster assignments as targets
"""
# swith to train mode
for model in models:
model.train()
# uniform sampling over pseudo labels
sampler = DistUnifTargSampler(
args.epoch_size,
dataset.sub_classes,
args.training_local_world_size,
args.training_local_rank,
seed=args.epoch + args.training_local_world_id,
)
loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
# running statistics
batch_time = AverageMeter()
data_time = AverageMeter()
# training statistics
log_top1_subclass = AverageMeter()
log_loss_subclass = AverageMeter()
log_top1_superclass = AverageMeter()
log_loss_superclass = AverageMeter()
log_top1 = AverageMeter()
log_loss = AverageMeter()
end = time.perf_counter()
cel = nn.CrossEntropyLoss().cuda()
relu = torch.nn.ReLU().cuda()
for iter_epoch, (inp, target) in enumerate(loader):
# start at iter start_iter
if iter_epoch < args.start_iter:
continue
# measure data loading time
data_time.update(time.perf_counter() - end)
# move input to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True).long()
# forward on the model
inp = relu(models[0](inp))
# forward on sub-class prediction layer
output = models[-1](inp)
loss_subclass = cel(output, target)
# forward on super-class prediction layer
super_class_output = models[1](inp)
sc_target = args.training_local_world_id + \
0 * torch.cuda.LongTensor(args.batch_size)
loss_superclass = cel(super_class_output, sc_target)
loss = loss_subclass + loss_superclass
# initialize the optimizers
for optimizer in optimizers:
optimizer.zero_grad()
# compute the gradients
loss.backward()
# step
for optimizer in optimizers:
optimizer.step()
# log
# signal received, relaunch experiment
if os.environ['SIGNAL_RECEIVED'] == 'True':
save_checkpoint(args, iter_epoch + 1, models, optimizers)
if not args.rank:
trigger_job_requeue(os.path.join(args.dump_path, 'checkpoint.pth.tar'))
# regular checkpoints
if iter_epoch and iter_epoch % 1000 == 0:
save_checkpoint(args, iter_epoch + 1, models, optimizers)
# update stats
log_loss.update(loss.item(), output.size(0))
prec1 = accuracy(args, output, target, sc_output=super_class_output)
log_top1.update(prec1.item(), output.size(0))
log_loss_superclass.update(loss_superclass.item(), output.size(0))
prec1 = accuracy(args, super_class_output, sc_target)
log_top1_superclass.update(prec1.item(), output.size(0))
log_loss_subclass.update(loss_subclass.item(), output.size(0))
prec1 = accuracy(args, output, target)
log_top1_subclass.update(prec1.item(), output.size(0))
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
# verbose
if iter_epoch % 100 == 0:
logger.info('Epoch[{0}] - Iter: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec {log_top1.val:.3f} ({log_top1.avg:.3f})\t'
'Super-class loss: {sc_loss.val:.3f} ({sc_loss.avg:.3f})\t'
'Super-class prec: {sc_prec.val:.3f} ({sc_prec.avg:.3f})\t'
'Intra super-class loss: {los.val:.3f} ({los.avg:.3f})\t'
'Intra super-class prec: {prec.val:.3f} ({prec.avg:.3f})\t'
.format(args.epoch, iter_epoch, len(loader), batch_time=batch_time,
data_time=data_time, loss=log_loss, log_top1=log_top1,
sc_loss=log_loss_superclass, sc_prec=log_top1_superclass,
los=log_loss_subclass, prec=log_top1_subclass))
# end of epoch
args.start_iter = 0
args.epoch += 1
# dump checkpoint
save_checkpoint(args, 0, models, optimizers)
if not args.rank:
if not (args.epoch - 1) % args.checkpoint_freq:
shutil.copyfile(
os.path.join(args.dump_path, 'checkpoint.pth.tar'),
os.path.join(args.dump_checkpoints,
'checkpoint' + str(args.epoch - 1) + '.pth.tar'),
)
return (args.epoch - 1,
args.epoch * len(loader),
log_top1.avg, log_loss.avg,
log_top1_superclass.avg, log_loss_superclass.avg,
log_top1_subclass.avg, log_loss_subclass.avg,
)
def save_checkpoint(args, iter_epoch, models, optimizers, path=''):
if not os.path.isfile(path):
path = os.path.join(args.dump_path, 'checkpoint.pth.tar')
# main process saves the training state
if not args.rank:
torch.save({
'epoch': args.epoch,
'start_iter': iter_epoch,
'state_dict': models[0].state_dict(),
'optimizer': optimizers[0].state_dict(),
'pred_layer_state_dict': models[1].state_dict(),
'optimizer_pred_layer': optimizers[1].state_dict(),
}, path)
# main local training process saves the last layer
if not args.training_local_rank:
torch.save({
'epoch': args.epoch,
'start_iter': iter_epoch,
'state_dict': models[-1].state_dict(),
'optimizer': optimizers[-1].state_dict(),
}, os.path.join(args.dump_path, str(args.training_local_world_id) + '-pred_layer.pth.tar'))
def accuracy(args, output, target, sc_output=None):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
batch_size = target.size(0)
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
if sc_output is not None:
_, pred = sc_output.topk(1, 1, True, True)
pred = pred.t()
target = args.training_local_world_id + 0 * torch.cuda.LongTensor(batch_size)
correct_sc = pred.eq(target.view(1, -1).expand_as(pred))
correct *= correct_sc
correct_1 = correct[:1].view(-1).float().sum(0, keepdim=True)
return correct_1.mul_(100.0 / batch_size)
def validate_network(val_loader, models, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
for model in models:
model.eval()
criterion = nn.CrossEntropyLoss().cuda()
with torch.no_grad():
end = time.perf_counter()
for i, (inp, target) in enumerate(val_loader):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = inp
for model in models:
output = model(output)
loss = criterion(output, target)
# measure accuracy and record loss
acc1 = accuracy(args, output, target)
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
# measure elapsed time
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
if i % 100 == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
.format(i, len(val_loader), batch_time=batch_time,
loss=losses, top1=top1))
return (top1.avg.item(), losses.avg)
|
DeeperCluster-main
|
src/trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from logging import getLogger
import pickle
import numpy as np
import torch
import torch.nn as nn
from src.model.model_factory import create_sobel_layer
from src.model.vgg16 import VGG16
logger = getLogger()
def load_pretrained(model, args):
"""
Load weights
"""
if not os.path.isfile(args.pretrained):
logger.info('pretrained weights not found')
return
# open checkpoint file
map_location = None
if args.world_size > 1:
map_location = "cuda:" + str(args.gpu_to_work_on)
checkpoint = torch.load(args.pretrained, map_location=map_location)
# clean keys from 'module'
checkpoint['state_dict'] = {rename_key(key): val
for key, val
in checkpoint['state_dict'].items()}
# remove sobel keys
if 'sobel.0.weight' in checkpoint['state_dict']:
del checkpoint['state_dict']['sobel.0.weight']
del checkpoint['state_dict']['sobel.0.bias']
del checkpoint['state_dict']['sobel.1.weight']
del checkpoint['state_dict']['sobel.1.bias']
# remove pred_layer keys
if 'pred_layer.weight' in checkpoint['state_dict']:
del checkpoint['state_dict']['pred_layer.weight']
del checkpoint['state_dict']['pred_layer.bias']
# load weights
model.body.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded pretrained weights from '{}'".format(args.pretrained))
def rename_key(key):
"Remove module from key"
if not 'module' in key:
return key
if key.startswith('module.body.'):
return key[12:]
if key.startswith('module.'):
return key[7:]
return ''.join(key.split('.module'))
|
DeeperCluster-main
|
src/model/pretrain.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
DeeperCluster-main
|
src/model/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.nn as nn
import torch.nn.init as init
cfg = {
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
}
class VGG16(nn.Module):
'''
VGG16 model
'''
def __init__(self, dim_in, relu=True, dropout=0.5, batch_norm=True):
super(VGG16, self).__init__()
self.features = make_layers(cfg['D'], dim_in, batch_norm=batch_norm)
self.dim_output_space = 4096
classifier = [
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(dropout),
nn.Linear(4096, 4096),
]
if relu:
classifier.append(nn.ReLU(True))
self.classifier = nn.Sequential(*classifier)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
if self.classifier is not None:
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, in_channels, batch_norm=True):
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
|
DeeperCluster-main
|
src/model/vgg16.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
import torch
import torch.nn as nn
import torch.optim
from .vgg16 import VGG16
logger = getLogger()
def create_sobel_layer():
grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0)
grayscale.weight.data.fill_(1.0 / 3.0)
grayscale.bias.data.zero_()
sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=0)
sobel_filter.weight.data[0, 0].copy_(
torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
)
sobel_filter.weight.data[1, 0].copy_(
torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
)
sobel_filter.bias.data.zero_()
sobel = nn.Sequential(grayscale, sobel_filter)
for p in sobel.parameters():
p.requires_grad = False
return sobel
class Net(nn.Module):
def __init__(self, padding, sobel, body, pred_layer):
super(Net, self).__init__()
# padding
self.padding = padding
# sobel filter
self.sobel = create_sobel_layer() if sobel else None
# main architecture
self.body = body
# prediction layer
self.pred_layer = pred_layer
self.conv = None
def forward(self, x):
if self.padding is not None:
x = self.padding(x)
if self.sobel is not None:
x = self.sobel(x)
if self.conv is not None:
count = 1
for m in self.body.features.modules():
if not isinstance(m, nn.Sequential):
x = m(x)
if isinstance(m, nn.ReLU):
if count == self.conv:
return x
count = count + 1
x = self.body(x)
if self.pred_layer is not None:
x = self.pred_layer(x)
return x
def model_factory(sobel, relu=False, num_classes=0, batch_norm=True):
"""
Create a network.
"""
dim_in = 2 if sobel else 3
padding = nn.ConstantPad2d(1, 0.0)
if sobel:
padding = nn.ConstantPad2d(2, 0.0)
body = VGG16(dim_in, relu=relu, batch_norm=batch_norm)
pred_layer = nn.Linear(body.dim_output_space, num_classes) if num_classes else None
return Net(padding, sobel, body, pred_layer)
def build_prediction_layer(dim_in, args, group=None, num_classes=0):
"""
Create prediction layer on gpu and its associated optimizer.
"""
if not num_classes:
num_classes = args.super_classes
# last fully connected layer
pred_layer = nn.Linear(dim_in, num_classes)
# move prediction layer to gpu
pred_layer = to_cuda(pred_layer, args.gpu_to_work_on, group=group)
# set optimizer for the prediction layer
optimizer_pred_layer = sgd_optimizer(pred_layer, args.lr, args.wd)
return pred_layer, optimizer_pred_layer
def to_cuda(net, gpu_id, apex=False, group=None):
net = net.cuda()
if apex:
from apex.parallel import DistributedDataParallel as DDP
net = DDP(net, delay_allreduce=True)
else:
net = nn.parallel.DistributedDataParallel(
net,
device_ids=[gpu_id],
process_group=group,
)
return net
def sgd_optimizer(module, lr, wd):
return torch.optim.SGD(
filter(lambda x: x.requires_grad, module.parameters()),
lr=lr,
momentum=0.9,
weight_decay=wd,
)
def sobel2RGB(net):
if net.sobel is None:
return
def computeweight(conv, alist, blist):
sob = net.sobel._modules['1'].weight
res = 0
for atup in alist:
for btup in blist:
x = conv[:, 0, atup[0], btup[0]]*sob[0, :, atup[1], btup[1]]
y = conv[:, 1, atup[0], btup[0]]*sob[1, :, atup[1], btup[1]]
res = res + x + y
return res
def aux(a):
if a == 0:
return [(0, 0)]
elif a == 1:
return [(1, 0), (0, 1)]
elif a == 2:
return [(2, 0), (1, 1), (0, 2)]
elif a == 3:
return [(2, 1), (1, 2)]
elif a == 4:
return [(2, 2)]
features = list(net.body.features.children())
conv_old = features[0]
conv_final = nn.Conv2d(3, 64, kernel_size=5, padding=1, bias=True)
for i in range(conv_old.kernel_size[0]):
for j in range(conv_old.kernel_size[0]):
neweight = 1/3* computeweight(conv_old.weight, aux(i), aux(j)).expand(3, 64).transpose(1, 0)
conv_final.weight.data[:, :, i, j].copy_(neweight)
conv_final.bias.data.copy_(conv_old.bias.data)
features[0] = conv_final
net.body.features = nn.Sequential(*features)
net.sobel = None
return
|
DeeperCluster-main
|
src/model/model_factory.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
DeeperCluster-main
|
src/data/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
from random import randrange
import os
import numpy as np
from sklearn.feature_extraction import image
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from .YFCC100M import YFCC100M_dataset
logger = getLogger()
def load_data(args):
"""
Load dataset.
"""
if 'yfcc100m' in args.data_path:
return YFCC100M_dataset(args.data_path, size=args.size_dataset)
return datasets.ImageFolder(args.data_path)
def get_data_transformations(rotation=0):
"""
Return data transformations for clustering and for training
"""
tr_normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
final_process = [transforms.ToTensor(), tr_normalize]
# for clustering stage
tr_central_crop = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
lambda x: np.asarray(x),
Rotate(0)
] + final_process)
# for training stage
tr_dataug = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
Rotate(rotation)
] + final_process)
return tr_central_crop, tr_dataug
class Rotate(object):
def __init__(self, rot):
self.rot = rot
def __call__(self, img):
return rotate_img(img, self.rot)
def rotate_img(img, rot):
if rot == 0: # 0 degrees rotation
return img
elif rot == 90: # 90 degrees rotation
return np.flipud(np.transpose(img, (1, 0, 2))).copy()
elif rot == 180: # 90 degrees rotation
return np.fliplr(np.flipud(img)).copy()
elif rot == 270: # 270 degrees rotation / or -90
return np.transpose(np.flipud(img), (1, 0, 2)).copy()
else:
return
class KFoldSampler(Sampler):
def __init__(self, im_per_target, shuffle):
self.im_per_target = im_per_target
N = 0
for tar in im_per_target:
N = N + len(im_per_target[tar])
self.N = N
self.shuffle = shuffle
def __iter__(self):
indices = np.zeros(self.N).astype(int)
c = 0
for tar in self.im_per_target:
indices[c: c + len(self.im_per_target[tar])] = self.im_per_target[tar]
c = c + len(self.im_per_target[tar])
if self.shuffle:
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self.N
class KFold():
"""Class to perform k-fold cross-validation.
Args:
im_per_target (Dict): key (target), value (list of data with this target)
i (int): index of the round of cross validation to perform
K (int): dataset randomly partitioned into K equal sized subsamples
Attributes:
val (KFoldSampler): validation sampler
train (KFoldSampler): training sampler
"""
def __init__(self, im_per_target, i, K):
assert(i<K)
per_target = {}
for tar in im_per_target:
per_target[tar] = int(len(im_per_target[tar]) // K)
im_per_target_train = {}
im_per_target_val = {}
for k in range(K):
for L in im_per_target:
if k==i:
im_per_target_val[L] = im_per_target[L][k * per_target[L]: (k + 1) * per_target[L]]
else:
if not L in im_per_target_train:
im_per_target_train[L] = []
im_per_target_train[L] = im_per_target_train[L] + im_per_target[L][k * per_target[L]: (k + 1) * per_target[L]]
self.val = KFoldSampler(im_per_target_val, False)
self.train = KFoldSampler(im_per_target_train, True)
def per_target(imgs):
"""Arrange samples per target.
Args:
imgs (list): List of (_, target) tuples.
Returns:
dict: key (target), value (list of data with this target)
"""
res = {}
for index in range(len(imgs)):
_, target = imgs[index]
if target not in res:
res[target] = []
res[target].append(index)
return res
|
DeeperCluster-main
|
src/data/loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import zipfile
import numpy as np
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torch.utils.data as data
def loader(path_zip, file_img):
"""
Load imagefile from zip.
"""
with zipfile.ZipFile(path_zip, 'r') as myzip:
img = Image.open(myzip.open(file_img))
return img.convert('RGB')
class YFCC100M_dataset(data.Dataset):
"""
YFCC100M dataset.
"""
def __init__(self, root, size, flickr_unique_ids=True, transform=None):
self.root = root
self.transform = transform
self.sub_classes = None
# remove data with uniform color and data we didn't manage to download
if flickr_unique_ids:
self.indexes = np.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flickr_unique_ids.npy'))
self.indexes = self.indexes[:min(size, len(self.indexes))]
else:
self.indexes = np.arange(size)
# for subsets
self.subset_indexes = None
def __getitem__(self, ind):
index = ind
if self.subset_indexes is not None:
index = self.subset_indexes[ind]
index = self.indexes[index]
index = format(index, "0>8d")
repo = index[:2]
z = index[2: 5]
file_img = index[5:] + '.jpg'
path_zip = os.path.join(self.root, repo, z) + '.zip'
# load the image
img = loader(path_zip, file_img)
# apply transformation
if self.transform is not None:
img = self.transform(img)
# id of cluster
sub_class = -100
if self.sub_classes is not None:
sub_class = self.sub_classes[ind]
return img, sub_class
def __len__(self):
if self.subset_indexes is not None:
return len(self.subset_indexes)
return len(self.indexes)
|
DeeperCluster-main
|
src/data/YFCC100M.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import glob
import os
from collections import defaultdict
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
import torch.utils.data as data
class VOC2007_dataset(data.Dataset):
def __init__(self, voc_dir, split='train', transform=None):
# Find the image sets
image_set_dir = os.path.join(voc_dir, 'ImageSets', 'Main')
image_sets = glob.glob(os.path.join(image_set_dir, '*_' + split + '.txt'))
assert len(image_sets) == 20
# Read the labels
self.n_labels = len(image_sets)
images = defaultdict(lambda:-np.ones(self.n_labels, dtype=np.uint8))
for k, s in enumerate(sorted(image_sets)):
for l in open(s, 'r'):
name, lbl = l.strip().split()
lbl = int(lbl)
# Switch the ignore label and 0 label (in VOC -1: not present, 0: ignore)
if lbl < 0:
lbl = 0
elif lbl == 0:
lbl = 255
images[os.path.join(voc_dir, 'JPEGImages', name + '.jpg')][k] = lbl
self.images = [(k, images[k]) for k in images.keys()]
np.random.shuffle(self.images)
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, i):
img = Image.open(self.images[i][0])
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, self.images[i][1]
|
DeeperCluster-main
|
src/data/VOC2007.py
|
"""Configuration parameters."""
config_args = {
# training
"seed": 1234,
"epochs": 50,
"batch_size": 256,
"learning_rate": 1e-3,
"eval_every": 10,
"patience": 20,
"optimizer": "RAdam",
"save": 1,
"fast_decoding": 1,
"num_samples": -1,
# model
"dtype": "double",
"rank": 2,
"temperature": 0.01,
"init_size": 1e-3,
"anneal_every": 20,
"anneal_factor": 1.0,
"max_scale": 1 - 1e-3,
# dataset
"dataset": "zoo",
}
|
HypHC-master
|
config.py
|
"""Script to visualize the HypHC clustering."""
import argparse
import json
import os
import matplotlib.pyplot as plt
import torch
from datasets.loading import load_data
from model.hyphc import HypHC
from utils.poincare import project
from utils.visualization import plot_tree_from_leaves
if __name__ == "__main__":
parser = argparse.ArgumentParser("Hyperbolic Hierarchical Clustering.")
parser.add_argument("--model_dir", type=str, required=True,
help="path to a directory with a torch model_{seed}.pkl and a config.json files saved by train.py."
)
parser.add_argument("--seed", type=str, default=0, help="model seed to use")
args = parser.parse_args()
# load dataset
config = json.load(open(os.path.join(args.model_dir, "config.json")))
config_args = argparse.Namespace(**config)
_, y_true, similarities = load_data(config_args.dataset)
# build HypHC model
model = HypHC(similarities.shape[0], config_args.rank, config_args.temperature, config_args.init_size,
config_args.max_scale)
params = torch.load(os.path.join(args.model_dir, f"model_{args.seed}.pkl"), map_location=torch.device('cpu'))
model.load_state_dict(params, strict=False)
model.eval()
# decode tree
tree = model.decode_tree(fast_decoding=True)
leaves_embeddings = model.normalize_embeddings(model.embeddings.weight.data)
leaves_embeddings = project(leaves_embeddings).detach().cpu().numpy()
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
ax = plot_tree_from_leaves(ax, tree, leaves_embeddings, labels=y_true)
fig.savefig(os.path.join(args.model_dir, f"embeddings_{args.seed}.png"))
|
HypHC-master
|
visualize.py
|
"""Train a hyperbolic embedding model for hierarchical clustering."""
import argparse
import json
import logging
import os
import numpy as np
import torch
import torch.utils.data as data
from tqdm import tqdm
import optim
from config import config_args
from datasets.hc_dataset import HCDataset
from datasets.loading import load_data
from model.hyphc import HypHC
from utils.metrics import dasgupta_cost
from utils.training import add_flags_from_config, get_savedir
def train(args):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# get saving directory
if args.save:
save_dir = get_savedir(args)
logging.info("Save directory: " + save_dir)
save_path = os.path.join(save_dir, "model_{}.pkl".format(args.seed))
if os.path.exists(save_dir):
if os.path.exists(save_path):
logging.info("Model with the same configuration parameters already exists.")
logging.info("Exiting")
return
else:
os.makedirs(save_dir)
with open(os.path.join(save_dir, "config.json"), 'w') as fp:
json.dump(args.__dict__, fp)
log_path = os.path.join(save_dir, "train_{}.log".format(args.seed))
hdlr = logging.FileHandler(log_path)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
# set seed
logging.info("Using seed {}.".format(args.seed))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set precision
logging.info("Using {} precision.".format(args.dtype))
if args.dtype == "double":
torch.set_default_dtype(torch.float64)
# create dataset
x, y_true, similarities = load_data(args.dataset)
dataset = HCDataset(x, y_true, similarities, num_samples=args.num_samples)
dataloader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True)
# create model
model = HypHC(dataset.n_nodes, args.rank, args.temperature, args.init_size, args.max_scale)
model.to("cuda")
# create optimizer
Optimizer = getattr(optim, args.optimizer)
optimizer = Optimizer(model.parameters(), args.learning_rate)
# train model
best_cost = np.inf
best_model = None
counter = 0
logging.info("Start training")
for epoch in range(args.epochs):
model.train()
total_loss = 0.0
with tqdm(total=len(dataloader), unit='ex') as bar:
for step, (triple_ids, triple_similarities) in enumerate(dataloader):
triple_ids = triple_ids.cuda()
triple_similarities = triple_similarities.cuda()
loss = model.loss(triple_ids, triple_similarities)
optimizer.zero_grad()
loss.backward()
optimizer.step()
bar.update(1)
bar.set_postfix(loss=f'{loss.item():.6f}')
total_loss += loss
total_loss = total_loss.item() / (step + 1.0)
logging.info("\t Epoch {} | average train loss: {:.6f}".format(epoch, total_loss))
# keep best embeddings
if (epoch + 1) % args.eval_every == 0:
model.eval()
tree = model.decode_tree(fast_decoding=args.fast_decoding)
cost = dasgupta_cost(tree, similarities)
logging.info("{}:\t{:.4f}".format("Dasgupta's cost", cost))
if cost < best_cost:
counter = 0
best_cost = cost
best_model = model.state_dict()
else:
counter += 1
if counter == args.patience:
logging.info("Early stopping.")
break
# anneal temperature
if (epoch + 1) % args.anneal_every == 0:
model.anneal_temperature(args.anneal_factor)
logging.info("Annealing temperature to: {}".format(model.temperature))
for param_group in optimizer.param_groups:
param_group['lr'] *= args.anneal_factor
lr = param_group['lr']
logging.info("Annealing learning rate to: {}".format(lr))
logging.info("Optimization finished.")
if best_model is not None:
# load best model
model.load_state_dict(best_model)
if args.save:
# save best embeddings
logging.info("Saving best model at {}".format(save_path))
torch.save(best_model, save_path)
# evaluation
model.eval()
logging.info("Decoding embeddings.")
tree = model.decode_tree(fast_decoding=args.fast_decoding)
cost = dasgupta_cost(tree, similarities)
logging.info("{}:\t{:.4f}".format("Dasgupta's cost", cost))
if args.save:
logger.removeHandler(hdlr)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser("Hyperbolic Hierarchical Clustering.")
parser = add_flags_from_config(parser, config_args)
args = parser.parse_args()
train(args)
|
HypHC-master
|
train.py
|
# from distutils.core import setup
from setuptools import setup
from Cython.Build import cythonize
import numpy
setup(
ext_modules=cythonize("mst.pyx", annotate=True, language_level="3"),
include_dirs=[numpy.get_include()],
)
|
HypHC-master
|
mst/setup.py
|
import numpy as np
import mst
if __name__ == '__main__':
x = np.array([0, 1, 3, 7, 15], dtype=np.float)
dists = np.abs(x[np.newaxis, :] - x[:, np.newaxis])
print(dists)
print(mst.mst(dists, 5))
print(-dists)
print(mst.mst(-dists, 5))
A = np.arange(16, dtype=np.float).reshape((4, 4))
print(A)
B = mst.reorder(A, np.array([3, 2, 1, 0]), 4)
print(B)
|
HypHC-master
|
mst/test_mst.py
|
import numpy as np
import unionfind
if __name__ == '__main__':
uf = unionfind.UnionFind(5)
uf.merge(np.array([[0, 1], [2, 3], [0, 4], [3, 4]]))
print(uf.parent)
print(uf.tree)
|
HypHC-master
|
unionfind/test_uf.py
|
# from distutils.core import setup
from setuptools import setup
from Cython.Build import cythonize
import numpy
setup(
ext_modules=cythonize("unionfind.pyx", annotate=True, language_level="3"),
include_dirs=[numpy.get_include()],
)
|
HypHC-master
|
unionfind/setup.py
|
"""Dataset loading."""
import os
import numpy as np
UCI_DATASETS = [
"glass",
"zoo",
"iris",
]
def load_data(dataset, normalize=True):
"""Load dataset.
@param dataset: dataset name
@type dataset: str
@param normalize: whether to normalize features or not
@type normalize: boolean
@return: feature vectors, labels, and pairwise similarities computed with cosine similarity
@rtype: Tuple[np.array, np.array, np.array]
"""
if dataset in UCI_DATASETS:
x, y = load_uci_data(dataset)
else:
raise NotImplementedError("Unknown dataset {}.".format(dataset))
if normalize:
x = x / np.linalg.norm(x, axis=1, keepdims=True)
x0 = x[None, :, :]
x1 = x[:, None, :]
cos = (x0 * x1).sum(-1)
similarities = 0.5 * (1 + cos)
similarities = np.triu(similarities) + np.triu(similarities).T
similarities[np.diag_indices_from(similarities)] = 1.0
similarities[similarities > 1.0] = 1.0
return x, y, similarities
def load_uci_data(dataset):
"""Loads data from UCI repository.
@param dataset: UCI dataset name
@return: feature vectors, labels
@rtype: Tuple[np.array, np.array]
"""
x = []
y = []
ids = {
"zoo": (1, 17, -1),
"iris": (0, 4, -1),
"glass": (1, 10, -1),
}
data_path = os.path.join(os.environ["DATAPATH"], dataset, "{}.data".format(dataset))
classes = {}
class_counter = 0
start_idx, end_idx, label_idx = ids[dataset]
with open(data_path, 'r') as f:
for line in f:
split_line = line.split(",")
if len(split_line) >= end_idx - start_idx + 1:
x.append([float(x) for x in split_line[start_idx:end_idx]])
label = split_line[label_idx]
if not label in classes:
classes[label] = class_counter
class_counter += 1
y.append(classes[label])
y = np.array(y, dtype=int)
x = np.array(x, dtype=float)
mean = x.mean(0)
std = x.std(0)
x = (x - mean) / std
return x, y
|
HypHC-master
|
datasets/loading.py
|
"""Triplet sampling utils."""
import numpy as np
from tqdm import tqdm
def samples_triples(n_nodes, num_samples):
num_samples = int(num_samples)
all_nodes = np.arange(n_nodes)
mesh = np.array(np.meshgrid(all_nodes, all_nodes))
pairs = mesh.T.reshape(-1, 2)
pairs = pairs[pairs[:, 0] < pairs[:, 1]]
n_pairs = pairs.shape[0]
if num_samples < n_pairs:
print("Generating all pairs subset")
subset = np.random.choice(np.arange(n_pairs), num_samples, replace=False)
pairs = pairs[subset]
else:
print("Generating all pairs superset")
k_base = int(num_samples / n_pairs)
k_rem = num_samples - (k_base * n_pairs)
subset = np.random.choice(np.arange(n_pairs), k_rem, replace=False)
pairs_rem = pairs[subset]
pairs_base = np.repeat(np.expand_dims(pairs, 0), k_base, axis=0).reshape((-1, 2))
pairs = np.concatenate([pairs_base, pairs_rem], axis=0)
num_samples = pairs.shape[0]
triples = np.concatenate(
[pairs, np.random.randint(n_nodes, size=(num_samples, 1))],
axis=1
)
return triples
def generate_all_triples(n_nodes):
triples = []
for n1 in tqdm(np.arange(n_nodes)):
for n2 in np.arange(n1 + 1, n_nodes):
for n3 in np.arange(n2 + 1, n_nodes):
triples += [(n1, n2, n3)]
return np.array(triples)
|
HypHC-master
|
datasets/triples.py
|
HypHC-master
|
datasets/__init__.py
|
|
"""Hierarchical clustering dataset."""
import logging
import numpy as np
import torch
import torch.utils.data as data
from datasets.triples import generate_all_triples, samples_triples
class HCDataset(data.Dataset):
"""Hierarchical clustering dataset."""
def __init__(self, features, labels, similarities, num_samples):
"""Creates Hierarchical Clustering dataset with triples.
@param labels: ground truth labels
@type labels: np.array of shape (n_datapoints,)
@param similarities: pairwise similarities between datapoints
@type similarities: np.array of shape (n_datapoints, n_datapoints)
"""
self.features = features
self.labels = labels
self.similarities = similarities
self.n_nodes = self.similarities.shape[0]
self.triples = self.generate_triples(num_samples)
def __len__(self):
return len(self.triples)
def __getitem__(self, idx):
triple = self.triples[idx]
s12 = self.similarities[triple[0], triple[1]]
s13 = self.similarities[triple[0], triple[2]]
s23 = self.similarities[triple[1], triple[2]]
similarities = np.array([s12, s13, s23])
return torch.from_numpy(triple), torch.from_numpy(similarities)
def generate_triples(self, num_samples):
logging.info("Generating triples.")
if num_samples < 0:
triples = generate_all_triples(self.n_nodes)
else:
triples = samples_triples(self.n_nodes, num_samples=num_samples)
logging.info(f"Total of {triples.shape[0]} triples")
return triples.astype("int64")
|
HypHC-master
|
datasets/hc_dataset.py
|
"""Riemannian optimizers."""
from .radam import RAdam
|
HypHC-master
|
optim/__init__.py
|
"""Riemannian adam optimizer geoopt implementation (https://github.com/geoopt/)."""
import torch.optim
from utils.poincare import expmap, egrad2rgrad, inner, project, ptransp
def copy_or_set_(dest, source):
"""
A workaround to respect strides of :code:`dest` when copying :code:`source`
(https://github.com/geoopt/geoopt/issues/70)
Parameters
----------
dest : torch.Tensor
Destination tensor where to store new data
source : torch.Tensor
Source data to put in the new tensor
Returns
-------
dest
torch.Tensor, modified inplace
"""
if dest.stride() != source.stride():
return dest.copy_(source)
else:
return dest.set_(source)
class RAdam(torch.optim.Adam):
"""Riemannian Adam with the same API as :class:`torch.optim.Adam`
Parameters
----------
params : iterable
iterable of parameters to optimize or dicts defining
parameter groups
lr : float (optional)
learning rate (default: 1e-3)
betas : Tuple[float, float] (optional)
coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps : float (optional)
term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay : float (optional)
weight decay (L2 penalty) (default: 0)
amsgrad : bool (optional)
whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
Other Parameters
----------------
stabilize : int
Stabilize parameters if they are off-manifold due to numerical
reasons every ``stabilize`` steps (default: ``None`` -- no stabilize)
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def step(self, closure=None):
"""Performs a single optimization step.
Arguments
---------
closure : callable (optional)
A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
with torch.no_grad():
for group in self.param_groups:
if "step" not in group:
group["step"] = 0
betas = group["betas"]
weight_decay = group["weight_decay"]
eps = group["eps"]
learning_rate = group["lr"]
amsgrad = group["amsgrad"]
for point in group["params"]:
grad = point.grad
if grad is None:
continue
if grad.is_sparse:
raise RuntimeError(
"Riemannian Adam does not support sparse gradients yet (PR is welcome)"
)
state = self.state[point]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(point)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(point)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(point)
# make local variables for easy access
exp_avg = state["exp_avg"]
exp_avg_sq = state["exp_avg_sq"]
# actual step
grad.add_(point, alpha=weight_decay)
grad = egrad2rgrad(point, grad)
exp_avg.mul_(betas[0]).add_(grad, alpha=1 - betas[0])
exp_avg_sq.mul_(betas[1]).add_(inner(point, grad), alpha=1 - betas[1])
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(eps)
else:
denom = exp_avg_sq.sqrt().add_(eps)
group["step"] += 1
bias_correction1 = 1 - betas[0] ** group["step"]
bias_correction2 = 1 - betas[1] ** group["step"]
step_size = (
learning_rate * bias_correction2 ** 0.5 / bias_correction1
)
# copy the state, we need it for retraction
# get the direction for ascend
direction = exp_avg / denom
# transport the exponential averaging to the new point
new_point = project(expmap(-step_size * direction, point))
exp_avg_new = ptransp(point, new_point, exp_avg)
# use copy only for user facing point
copy_or_set_(point, new_point)
exp_avg.set_(exp_avg_new)
group["step"] += 1
return loss
|
HypHC-master
|
optim/radam.py
|
"""Poincare utils functions."""
import torch
from utils.math import arctanh, tanh
MIN_NORM = 1e-15
BALL_EPS = {torch.float32: 4e-3, torch.float64: 1e-5}
def egrad2rgrad(p, dp):
"""Converts Euclidean gradient to Hyperbolic gradient."""
lambda_p = lambda_(p)
dp /= lambda_p.pow(2)
return dp
def lambda_(x):
"""Computes the conformal factor."""
x_sqnorm = torch.sum(x.data.pow(2), dim=-1, keepdim=True)
return 2 / (1. - x_sqnorm).clamp_min(MIN_NORM)
def inner(x, u, v=None):
"""Computes inner product for two tangent vectors."""
if v is None:
v = u
lx = lambda_(x)
return lx ** 2 * (u * v).sum(dim=-1, keepdim=True)
def gyration(u, v, w):
"""Gyration."""
u2 = u.pow(2).sum(dim=-1, keepdim=True)
v2 = v.pow(2).sum(dim=-1, keepdim=True)
uv = (u * v).sum(dim=-1, keepdim=True)
uw = (u * w).sum(dim=-1, keepdim=True)
vw = (v * w).sum(dim=-1, keepdim=True)
a = - uw * v2 + vw + 2 * uv * vw
b = - vw * u2 - uw
d = 1 + 2 * uv + u2 * v2
return w + 2 * (a * u + b * v) / d.clamp_min(MIN_NORM)
def ptransp(x, y, u):
"""Parallel transport."""
lx = lambda_(x)
ly = lambda_(y)
return gyration(y, -x, u) * lx / ly
def expmap(u, p):
u_norm = u.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
second_term = tanh(lambda_(p) * u_norm / 2) * u / u_norm
gamma_1 = mobius_add(p, second_term)
return gamma_1
def project(x):
"""Projects points on the manifold."""
norm = x.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
eps = BALL_EPS[x.dtype]
maxnorm = (1 - eps)
cond = norm > maxnorm
projected = x / norm * maxnorm
return torch.where(cond, projected, x)
def mobius_add(x, y):
"""Mobius addition."""
x2 = torch.sum(x * x, dim=-1, keepdim=True)
y2 = torch.sum(y * y, dim=-1, keepdim=True)
xy = torch.sum(x * y, dim=-1, keepdim=True)
num = (1 + 2 * xy + y2) * x + (1 - x2) * y
denom = 1 + 2 * xy + x2 * y2
return num / denom.clamp_min(MIN_NORM)
def mobius_mul(x, t):
"""Mobius scalar multiplication."""
normx = x.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
return tanh(t * arctanh(normx)) * x / normx
def get_midpoint_o(x):
"""
Computes hyperbolic midpoint between x and the origin.
"""
return mobius_mul(x, 0.5)
def hyp_dist_o(x):
"""
Computes hyperbolic distance between x and the origin.
"""
x_norm = x.norm(dim=-1, p=2, keepdim=True)
return 2 * arctanh(x_norm)
|
HypHC-master
|
utils/poincare.py
|
"""LCA construction utils."""
import torch
from utils.poincare import MIN_NORM, hyp_dist_o
def isometric_transform(a, x):
"""Reflection (circle inversion of x through orthogonal circle centered at a)."""
r2 = torch.sum(a ** 2, dim=-1, keepdim=True) - 1.
u = x - a
return r2 / torch.sum(u ** 2, dim=-1, keepdim=True) * u + a
def reflection_center(mu):
"""Center of inversion circle."""
return mu / torch.sum(mu ** 2, dim=-1, keepdim=True)
def euc_reflection(x, a):
"""
Euclidean reflection (also hyperbolic) of x
Along the geodesic that goes through a and the origin
(straight line)
"""
xTa = torch.sum(x * a, dim=-1, keepdim=True)
norm_a_sq = torch.sum(a ** 2, dim=-1, keepdim=True).clamp_min(MIN_NORM)
proj = xTa * a / norm_a_sq
return 2 * proj - x
def _halve(x):
""" computes the point on the geodesic segment from o to x at half the distance """
return x / (1. + torch.sqrt(1 - torch.sum(x ** 2, dim=-1, keepdim=True)))
def hyp_lca(a, b, return_coord=True):
"""
Computes projection of the origin on the geodesic between a and b, at scale c
More optimized than hyp_lca1
"""
r = reflection_center(a)
b_inv = isometric_transform(r, b)
o_inv = a
o_inv_ref = euc_reflection(o_inv, b_inv)
o_ref = isometric_transform(r, o_inv_ref)
proj = _halve(o_ref)
if not return_coord:
return hyp_dist_o(proj)
else:
return proj
|
HypHC-master
|
utils/lca.py
|
"""Evaluation utils."""
import numpy as np
#from mst import reorder
from mst import mst
from utils.tree import descendants_traversal, descendants_count
def dasgupta_cost_iterative(tree, similarities):
""" Non-recursive version of DC. Also works on non-binary trees """
n = len(list(tree.nodes()))
root = n - 1
cost = [0] * n
desc = [None] * n # intermediate computation: children of node
children = [list(tree.neighbors(node)) for node in range(n)] # children remaining to process
stack = [root]
while len(stack) > 0:
node = stack[-1]
if len(children[node]) > 0:
stack.append(children[node].pop())
else:
children_ = list(tree.neighbors(node))
if len(children_) == 0:
desc[node] = [node]
else:
# Intermediate computations
desc[node] = [d for c in children_ for d in desc[c]]
# Cost at this node
# cost_ = similarities[desc[node]].T[desc[node]].sum()
# cost_ -= sum([similarities[desc[c]].T[desc[c]].sum() for c in children_])
# cost_ = cost_ / 2.0
# This is much faster for imbalanced trees
cost_ = sum([similarities[desc[c0]].T[desc[c1]].sum() for i, c0 in enumerate(children_) for c1 in
children_[i + 1:]])
cost_ *= len(desc[node])
cost[node] = cost_ + sum([cost[c] for c in children_]) # recursive cost
# Free intermediate computations (otherwise, up to n^2 space for recursive descendants)
for c in children_:
desc[c] = None
assert node == stack.pop()
return 2 * cost[root]
def dasgupta_cost(tree, similarities):
""" Non-recursive version of DC for binary trees.
Optimized for speed by reordering similarity matrix for locality
"""
n = len(list(tree.nodes()))
root = n - 1
n_leaves = len(similarities)
leaves = descendants_traversal(tree)
n_desc, left_desc = descendants_count(tree)
cost = [0] * n # local cost for every node
# reorder similarity matrix for locality
# similarities = similarities[leaves].T[leaves] # this is the bottleneck; is there a faster way?
similarities = mst.reorder(similarities, np.array(leaves), n_leaves) # this is the bottleneck; is there a faster way?
# Recursive computation
children = [list(tree.neighbors(node)) for node in range(n)] # children remaining to process
stack = [root]
while len(stack) > 0:
node = stack[-1]
if len(children[node]) > 0:
stack.append(children[node].pop())
else:
children_ = list(tree.neighbors(node))
if len(children_) < 2:
pass
elif len(children_) == 2:
left_c = children_[0]
right_c = children_[1]
left_range = [left_desc[left_c], left_desc[left_c] + n_desc[left_c]]
right_range = [left_desc[right_c], left_desc[right_c] + n_desc[right_c]]
cost_ = np.add.reduceat(
np.add.reduceat(
similarities[
left_range[0]:left_range[1],
right_range[0]:right_range[1]
], [0], axis=1
), [0], axis=0
)
cost[node] = cost_[0, 0]
else:
assert False, "tree must be binary"
assert node == stack.pop()
return 2 * sum(np.array(cost) * np.array(n_desc))
|
HypHC-master
|
utils/metrics.py
|
"""Tree traversal util functions."""
def descendants_traversal(tree):
"""Get all descendants non-recursively, in traversal order."""
n = len(list(tree.nodes()))
root = n - 1
traversal = []
children = [list(tree.neighbors(node)) for node in range(n)] # children remaining to process
is_leaf = [len(children[node]) == 0 for node in range(n)]
stack = [root]
while len(stack) > 0:
node = stack[-1]
if len(children[node]) > 0:
stack.append(children[node].pop())
else:
assert node == stack.pop()
if is_leaf[node]:
traversal.append(node)
return traversal[::-1]
def descendants_count(tree):
"""For every node, count its number of descendant leaves, and the number of leaves before it."""
n = len(list(tree.nodes()))
root = n - 1
left = [0] * n
desc = [0] * n
leaf_idx = 0
children = [list(tree.neighbors(node))[::-1] for node in range(n)] # children remaining to process
stack = [root]
while len(stack) > 0:
node = stack[-1]
if len(children[node]) > 0:
stack.append(children[node].pop())
else:
children_ = list(tree.neighbors(node))
if len(children_) == 0:
desc[node] = 1
left[node] = leaf_idx
leaf_idx += 1
else:
desc[node] = sum([desc[c] for c in children_])
left[node] = left[children_[0]]
assert node == stack.pop()
return desc, left
|
HypHC-master
|
utils/tree.py
|
"""Decoding utils."""
import time
import numpy as np
import torch
from tqdm import tqdm
from mst import mst
from unionfind import unionfind
from utils.lca import hyp_lca
### Single linkage using MST trick
# @profile
def sl_np_mst(similarities):
n = similarities.shape[0]
ij, _ = mst.mst(similarities, n)
uf = unionfind.UnionFind(n)
uf.merge(ij)
return uf.tree
def sl_from_embeddings(xs, S):
xs0 = xs[None, :, :]
xs1 = xs[:, None, :]
sim_mat = S(xs0, xs1) # (n, n)
return sl_np_mst(sim_mat.numpy())
### Single linkage using naive union find
# @profile
def nn_merge_uf_fast_np(xs, S, partition_ratio=None, verbose=False):
""" Uses Cython union find and numpy sorting
partition_ratio: either None, or real number > 1
similarities will be partitioned into buckets of geometrically increasing size
"""
n = xs.shape[0]
# Construct distance matrix (negative similarity; since numpy only has increasing sorting)
xs0 = xs[None, :, :]
xs1 = xs[:, None, :]
dist_mat = -S(xs0, xs1) # (n, n)
i, j = np.meshgrid(np.arange(n, dtype=int), np.arange(n, dtype=int))
# Keep only unique pairs (upper triangular indices)
idx = np.tril_indices(n, -1)
ij = np.stack([i[idx], j[idx]], axis=-1)
dist_mat = dist_mat[idx]
# Sort pairs
if partition_ratio is None:
idx = np.argsort(dist_mat, axis=0)
else:
k, ks = ij.shape[0], []
while k > 0:
k = int(k // partition_ratio)
ks.append(k)
ks = np.array(ks)[::-1]
if verbose:
print(ks)
idx = np.argpartition(dist_mat, ks, axis=0)
ij = ij[idx]
# Union find merging
uf = unionfind.UnionFind(n)
uf.merge(ij)
return uf.tree
|
HypHC-master
|
utils/linkage.py
|
HypHC-master
|
utils/__init__.py
|
|
"""Visualization utils."""
import matplotlib.pyplot as plt
import numpy as np
import torch
from utils.lca import hyp_lca
def mobius_add(x, y):
"""Mobius addition in numpy."""
xy = np.sum(x * y, 1, keepdims=True)
x2 = np.sum(x * x, 1, keepdims=True)
y2 = np.sum(y * y, 1, keepdims=True)
num = (1 + 2 * xy + y2) * x + (1 - x2) * y
den = 1 + 2 * xy + x2 * y2
return num / den
def mobius_mul(x, t):
"""Mobius multiplication in numpy."""
normx = np.sqrt(np.sum(x * x, 1, keepdims=True))
return np.tanh(t * np.arctanh(normx)) * x / normx
def geodesic_fn(x, y, nb_points=100):
"""Get coordinates of points on the geodesic between x and y."""
t = np.linspace(0, 1, nb_points)
x_rep = np.repeat(x.reshape((1, -1)), len(t), 0)
y_rep = np.repeat(y.reshape((1, -1)), len(t), 0)
t1 = mobius_add(-x_rep, y_rep)
t2 = mobius_mul(t1, t.reshape((-1, 1)))
return mobius_add(x_rep, t2)
def plot_geodesic(x, y, ax):
"""Plots geodesic between x and y."""
points = geodesic_fn(x, y)
ax.plot(points[:, 0], points[:, 1], color='black', linewidth=1.5, alpha=1)
def complete_tree(tree, leaves_embeddings):
"""Get embeddings of internal nodes from leaves' embeddings using LCA construction."""
def _complete_tree(embeddings, node):
children = list(tree.neighbors(node))
if len(children) == 2:
left_c, right_c = children
left_leaf = is_leaf(tree, left_c)
right_leaf = is_leaf(tree, right_c)
if left_leaf and right_leaf:
pass
elif left_leaf and not right_leaf:
embeddings = _complete_tree(embeddings, right_c)
elif right_leaf and not left_leaf:
embeddings = _complete_tree(embeddings, left_c)
else:
embeddings = _complete_tree(embeddings, right_c)
embeddings = _complete_tree(embeddings, left_c)
embeddings[node] = hyp_lca_numpy(embeddings[left_c], embeddings[right_c])
return embeddings
n = leaves_embeddings.shape[0]
tree_embeddings = np.zeros((2 * n - 1, 2))
tree_embeddings[:n, :] = leaves_embeddings
root = max(list(tree.nodes()))
tree_embeddings = _complete_tree(tree_embeddings, root)
return tree_embeddings
def hyp_lca_numpy(x, y):
"""Computes the hyperbolic LCA in numpy."""
x = torch.from_numpy(x).view((1, 2))
y = torch.from_numpy(y).view((1, 2))
lca = hyp_lca(x, y, return_coord=True)
return lca.view((2,)).numpy()
def is_leaf(tree, node):
"""check if node is a leaf in tree."""
return len(list(tree.neighbors(node))) == 0
def plot_tree_from_leaves(ax, tree, leaves_embeddings, labels, color_seed=1234):
"""Plots a tree on leaves embeddings using the LCA construction."""
circle = plt.Circle((0, 0), 1.0, color='r', alpha=0.1)
ax.add_artist(circle)
n = leaves_embeddings.shape[0]
embeddings = complete_tree(tree, leaves_embeddings)
colors = get_colors(labels, color_seed)
ax.scatter(embeddings[:n, 0], embeddings[:n, 1], c=colors, s=50, alpha=0.6)
for n1, n2 in tree.edges():
x1 = embeddings[n1]
x2 = embeddings[n2]
plot_geodesic(x1, x2, ax)
ax.set_xlim(-1.05, 1.05)
ax.set_ylim(-1.05, 1.05)
ax.axis("off")
return ax
def get_colors(y, color_seed=1234):
"""random color assignment for label classes."""
np.random.seed(color_seed)
colors = {}
for k in np.unique(y):
r = np.random.random()
b = np.random.random()
g = np.random.random()
colors[k] = (r, g, b)
return [colors[k] for k in y]
|
HypHC-master
|
utils/visualization.py
|
"""Math util functions."""
import torch
# ################# tanh ########################
class Artanh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(-1 + 1e-5, 1 - 1e-5)
ctx.save_for_backward(x)
dtype = x.dtype
x = x.double()
return (torch.log_(1 + x).sub_(torch.log_(1 - x))).mul_(0.5).to(dtype)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad = grad_output / (1 - input ** 2)
return grad
def arctanh(x):
return Artanh.apply(x)
def tanh(x):
return x.clamp(-15, 15).tanh()
# ################# cosh ########################
class Arcosh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(min=1 + 1e-7)
ctx.save_for_backward(x)
z = x.double()
return (z + torch.sqrt_(z.pow(2) - 1)).clamp_min_(1e-15).log_().to(x.dtype)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / (input ** 2 - 1) ** 0.5
def arcosh(x):
return Arcosh.apply(x)
def cosh(x, clamp=15):
return x.clamp(-clamp, clamp).cosh()
# ################# sinh ########################
class Arsinh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
z = x.double()
return (z + torch.sqrt_(1 + z.pow(2))).clamp_min_(1e-15).log_().to(x.dtype)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / (1 + input ** 2) ** 0.5
def arsinh(x):
return Arsinh.apply(x)
def sinh(x, clamp=15):
return x.clamp(-clamp, clamp).sinh()
|
HypHC-master
|
utils/math.py
|
"""Training utils."""
import argparse
import hashlib
import os
def str2bool(v):
"""Converts string to boolean."""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def add_flags_from_config(parser, config_dict):
"""Adds a flag (and default value) to an ArgumentParser for each parameter in a config."""
def OrNone(default):
def func(x):
# Convert "none" to proper None object
if x.lower() == "none":
return None
# If default is None (and x is not None), return x without conversion as str
elif default is None:
return str(x)
# Otherwise, default has non-None type; convert x to that type
else:
return type(default)(x)
return func
for param in config_dict:
default = config_dict[param]
try:
if isinstance(default, dict):
parser = add_flags_from_config(parser, default)
else:
parser.add_argument(f"--{param}", type=OrNone(default), default=default)
except argparse.ArgumentError:
print(
f"Could not add flag for param {param} because it was already present."
)
return parser
def hash_dict(values):
"""Hash of dict key, value pairs."""
m = hashlib.sha256()
keys = sorted(list(values.keys()))
for k in keys:
if k != "seed":
m.update(str(values[k]).encode('utf-8'))
return m.hexdigest()
def get_savedir(args):
"""Hash of args used for training."""
dir_hash = hash_dict(args.__dict__)
save_dir = os.path.join(os.environ["SAVEPATH"], args.dataset, dir_hash)
return save_dir
|
HypHC-master
|
utils/training.py
|
HypHC-master
|
model/__init__.py
|
|
"""Hyperbolic hierarchical clustering model."""
import networkx as nx
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.lca import hyp_lca
from utils.linkage import nn_merge_uf_fast_np, sl_from_embeddings
from utils.poincare import project
class HypHC(nn.Module):
"""
Hyperbolic embedding model for hierarchical clustering.
"""
def __init__(self, n_nodes=1, rank=2, temperature=0.05, init_size=1e-3, max_scale=1. - 1e-3):
super(HypHC, self).__init__()
self.n_nodes = n_nodes
self.embeddings = nn.Embedding(n_nodes, rank)
self.temperature = temperature
self.scale = nn.Parameter(torch.Tensor([init_size]), requires_grad=True)
self.embeddings.weight.data = project(
self.scale * (2 * torch.rand((n_nodes, rank)) - 1.0)
)
self.init_size = init_size
self.max_scale = max_scale
def anneal_temperature(self, anneal_factor):
"""
@param anneal_factor: scalar for temperature decay
@type anneal_factor: float
"""
self.temperature *= anneal_factor
def normalize_embeddings(self, embeddings):
"""Normalize leaves embeddings to have the lie on a diameter."""
min_scale = 1e-2 #self.init_size
max_scale = self.max_scale
return F.normalize(embeddings, p=2, dim=1) * self.scale.clamp_min(min_scale).clamp_max(max_scale)
def loss(self, triple_ids, similarities):
"""Computes the HypHC loss.
Args:
triple_ids: B x 3 tensor with triple ids
similarities: B x 3 tensor with pairwise similarities for triples
[s12, s13, s23]
"""
e1 = self.embeddings(triple_ids[:, 0])
e2 = self.embeddings(triple_ids[:, 1])
e3 = self.embeddings(triple_ids[:, 2])
e1 = self.normalize_embeddings(e1)
e2 = self.normalize_embeddings(e2)
e3 = self.normalize_embeddings(e3)
d_12 = hyp_lca(e1, e2, return_coord=False)
d_13 = hyp_lca(e1, e3, return_coord=False)
d_23 = hyp_lca(e2, e3, return_coord=False)
lca_norm = torch.cat([d_12, d_13, d_23], dim=-1)
weights = torch.softmax(lca_norm / self.temperature, dim=-1)
w_ord = torch.sum(similarities * weights, dim=-1, keepdim=True)
total = torch.sum(similarities, dim=-1, keepdim=True) - w_ord
return torch.mean(total)
def decode_tree(self, fast_decoding):
"""Build a binary tree (nx graph) from leaves' embeddings. Assume points are normalized to same radius."""
leaves_embeddings = self.normalize_embeddings(self.embeddings.weight.data)
leaves_embeddings = project(leaves_embeddings).detach().cpu()
sim_fn = lambda x, y: torch.sum(x * y, dim=-1)
if fast_decoding:
parents = nn_merge_uf_fast_np(leaves_embeddings, S=sim_fn, partition_ratio=1.2)
else:
parents = sl_from_embeddings(leaves_embeddings, sim_fn)
tree = nx.DiGraph()
for i, j in enumerate(parents[:-1]):
tree.add_edge(j, i)
return tree
|
HypHC-master
|
model/hyphc.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import time
import os
from six.moves import cPickle
import traceback
from collections import defaultdict
import captioning.utils.opts as opts
import captioning.models as models
from captioning.data.dataloader import DataLoader
import skimage.io
import captioning.utils.eval_utils_joint as eval_utils
import captioning.utils.misc as utils
from captioning.utils.rewards import init_scorer, get_self_critical_reward
from captioning.modules.loss_wrapper_joint import LossWrapper
def add_summary_value(writer, key, value, iteration):
if writer:
writer.add_scalar(key, value, iteration)
def train(opt):
################################
# Build dataloader
################################
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
##########################
# Initialize infos
##########################
infos = {
'iter': 0,
'epoch': 0,
'loader_state_dict': None,
'vocab': loader.get_vocab(),
}
# Load old infos(if there is) and check if models are compatible
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl')):
with open(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl'), 'rb') as f:
infos = utils.pickle_load(f)
saved_model_opt = infos['opt']
need_be_same=["caption_model", "rnn_type", "rnn_size", "num_layers"]
for checkme in need_be_same:
assert getattr(saved_model_opt, checkme) == getattr(opt, checkme), "Command line argument and saved model disagree on '%s' " % checkme
infos['opt'] = opt
#########################
# Build logger
#########################
# naive dict logger
histories = defaultdict(dict)
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl')):
with open(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl'), 'rb') as f:
histories.update(utils.pickle_load(f))
# tensorboard logger
tb_summary_writer = SummaryWriter(opt.checkpoint_path)
##########################
# Build model
##########################
opt.vocab = loader.get_vocab()
model = models.setup(opt).cuda()
del opt.vocab
# Load pretrained weights:
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'model.pth')):
model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))
# Wrap generation model with loss function(used for training)
# This allows loss function computed separately on each machine
lw_model = LossWrapper(model, opt)
# Wrap with dataparallel
dp_model = torch.nn.DataParallel(model)
dp_model.vocab = getattr(model, 'vocab', None) # nasty
dp_lw_model = torch.nn.DataParallel(lw_model)
##########################
# Build optimizer
##########################
if opt.noamopt:
assert opt.caption_model in ['transformer', 'bert', 'm2transformer'], 'noamopt can only work with transformer'
optimizer = utils.get_std_opt(model, optim_func=opt.optim, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
elif opt.reduce_on_plateau:
optimizer = utils.build_optimizer(model.parameters(), opt)
optimizer = utils.ReduceLROnPlateau(optimizer,
factor=opt.reduce_on_plateau_factor,
patience=opt.reduce_on_plateau_patience)
else:
optimizer = utils.build_optimizer(model.parameters(), opt)
# Load the optimizer
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from,"optimizer.pth")):
optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))
#########################
# Get ready to start
#########################
iteration = infos['iter']
epoch = infos['epoch']
# For back compatibility
if 'iterators' in infos:
infos['loader_state_dict'] = {split: {'index_list': infos['split_ix'][split], 'iter_counter': infos['iterators'][split]} for split in ['train', 'val', 'test']}
loader.load_state_dict(infos['loader_state_dict'])
if opt.load_best_score == 1:
best_val_score = infos.get('best_val_score', None)
if opt.noamopt:
optimizer._step = iteration
# flag indicating finish of an epoch
# Always set to True at the beginning to initialize the lr or etc.
epoch_done = True
# Assure in training mode
dp_lw_model.train()
# Start training
try:
while True:
# Stop if reaching max epochs
if epoch >= opt.max_epochs and opt.max_epochs != -1:
break
if epoch_done:
if not opt.noamopt and not opt.reduce_on_plateau:
# Assign the learning rate
if epoch > opt.learning_rate_decay_start and opt.learning_rate_decay_start >= 0:
frac = (epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every
decay_factor = opt.learning_rate_decay_rate ** frac
opt.current_lr = opt.learning_rate * decay_factor
else:
opt.current_lr = opt.learning_rate
utils.set_lr(optimizer, opt.current_lr) # set the decayed rate
# Assign the scheduled sampling prob
if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
# If start self critical training
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
sc_flag = True
init_scorer(opt.cached_tokens)
else:
sc_flag = False
# If start structure loss training
if opt.structure_after != -1 and epoch >= opt.structure_after:
struc_flag = True
init_scorer(opt.cached_tokens)
else:
struc_flag = False
epoch_done = False
start = time.time()
if opt.use_warmup and (iteration < opt.noamopt_warmup):
opt.current_lr = opt.learning_rate * (iteration+1) / opt.noamopt_warmup
utils.set_lr(optimizer, opt.current_lr)
# Load data from train split (0)
data = loader.get_batch('train')
print('Read data:', time.time() - start)
torch.cuda.synchronize()
start = time.time()
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'], data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_ if _ is None else _.cuda() for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
optimizer.zero_grad()
model_out = dp_lw_model(fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag, struc_flag)
loss = model_out['loss'].mean()
loss.backward()
if opt.grad_clip_value != 0:
getattr(torch.nn.utils, 'clip_grad_%s_' %(opt.grad_clip_mode))(model.parameters(), opt.grad_clip_value)
if not torch.isnan(loss):
if opt.language_eval == 1:
print('Doing final model evaluation, not updating model.')
else:
optimizer.step()
else:
print('Meet nan loss', data['gts'], model_out)
train_loss = loss.item()
torch.cuda.synchronize()
end = time.time()
if struc_flag:
print("iter {} (epoch {}), train_loss = {:.3f}, lm_loss = {:.3f}, struc_loss = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, train_loss, model_out['lm_loss'].mean().item(), model_out['struc_loss'].mean().item(), end - start))
elif not sc_flag:
print("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, train_loss, end - start))
else:
print("iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, model_out['reward'].mean(), end - start))
# Update the iteration and epoch
iteration += 1
if data['bounds']['wrapped']:
epoch += 1
epoch_done = True
# Write the training loss summary
if (iteration % opt.losses_log_every == 0):
tb_summary_writer.add_scalar('train_loss', train_loss, iteration)
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif opt.reduce_on_plateau:
opt.current_lr = optimizer.current_lr
tb_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)
tb_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
tb_summary_writer.add_scalar('avg_reward', model_out['reward'].mean(), iteration)
elif struc_flag:
tb_summary_writer.add_scalar('lm_loss', model_out['lm_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('struc_loss', model_out['struc_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward', model_out['reward'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward_var', model_out['reward'].var(1).mean(), iteration)
histories['loss_history'][iteration] = train_loss if not sc_flag else model_out['reward'].mean()
histories['lr_history'][iteration] = opt.current_lr
histories['ss_prob_history'][iteration] = model.ss_prob
# update infos
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
# make evaluation on validation set, and save model
if opt.language_eval == 1 or (iteration % opt.save_checkpoint_every == 0 and not opt.save_every_epoch) or \
(epoch_done and opt.save_every_epoch):
# eval model
eval_kwargs = {'split': 'val',
'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
assert (opt.task in ['caption', 'c_joint_t'] and opt.eval_task == 'caption') or \
(opt.task in ['trace', 'c_joint_t'] and opt.eval_task == 'trace') or \
(opt.task == 'pred_both' and opt.eval_task == 'pred_both')
if opt.eval_task == 'caption':
val_loss, predictions, lang_stats = eval_utils.eval_split(dp_model, lw_model.crit_caption,
loader,
'caption', eval_kwargs)
elif opt.eval_task == 'trace':
val_loss = None
# This is a little time consuming due to the linear programming solve.
val_loss = eval_utils.eval_trace_generation(dp_model, lw_model.crit_trace, loader, window_size=0,
eval_kwargs=eval_kwargs) # Adjust the window_size as needed
lang_stats = None;
predictions = None;
elif opt.eval_task == 'pred_both':
val_loss, predictions, lang_stats = eval_utils.eval_split(dp_model, lw_model.crit_caption, loader,
'both', eval_kwargs) # caption generation
val_loss_trace = eval_utils.eval_trace_generation(dp_model, lw_model.crit_trace, loader, window_size=0,
eval_kwargs=eval_kwargs) # Adjust the window_size as needed
if opt.language_eval == 1:
break # The language eval is done during testing, after the training finishes.
if opt.reduce_on_plateau:
if 'CIDEr' in lang_stats:
optimizer.scheduler_step(-lang_stats['CIDEr'])
else:
optimizer.scheduler_step(val_loss)
# Write validation result into summary
tb_summary_writer.add_scalar('validation loss', val_loss, iteration)
if lang_stats is not None:
for k,v in lang_stats.items():
tb_summary_writer.add_scalar(k, v, iteration)
histories['val_result_history'][iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
# Save model if is improving on validation result
if opt.language_eval == 1:
current_score = lang_stats['CIDEr']
else:
current_score = - val_loss
best_flag = False
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
# Dump miscalleous informations
infos['best_val_score'] = best_val_score
# '''
utils.save_checkpoint(opt, model, infos, optimizer, histories)
if opt.save_history_ckpt:
utils.save_checkpoint(opt, model, infos, optimizer,
append=str(epoch) if opt.save_every_epoch else str(iteration))
if best_flag:
utils.save_checkpoint(opt, model, infos, optimizer, append='best')
# '''
except (RuntimeError, KeyboardInterrupt):
print('Save ckpt on exception ...')
# '''
utils.save_checkpoint(opt, model, infos, optimizer)
print('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
# '''
opt = opts.parse_opt()
train(opt)
|
connect-caption-and-trace-main
|
tools/train.py
|
connect-caption-and-trace-main
|
captioning/__init__.py
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
import os
import torch.nn.functional as F
import six
from six.moves import cPickle
bad_endings = ['with','in','on','of','a','at','to','for','an','this','his','her','that']
bad_endings += ['the']
def pickle_load(f):
""" Load a pickle.
Parameters
----------
f: file-like object
"""
if six.PY3:
return cPickle.load(f, encoding='latin-1')
else:
return cPickle.load(f)
def pickle_dump(obj, f):
""" Dump a pickle.
Parameters
----------
obj: pickled object
f: file-like object
"""
if six.PY3:
return cPickle.dump(obj, f, protocol=2)
else:
return cPickle.dump(obj, f)
# modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/comm.py
def serialize_to_tensor(data):
device = torch.device("cpu")
buffer = cPickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def deserialize(tensor):
buffer = tensor.cpu().numpy().tobytes()
return cPickle.loads(buffer)
# Input: seq, N*D numpy array, with element 0 .. vocab_size. 0 is END token.
def decode_sequence(ix_to_word, seq):
N, D = seq.size()
out = []
for i in range(N):
txt = ''
for j in range(D):
ix = seq[i,j]
if ix > 0 :
if j >= 1:
txt = txt + ' '
txt = txt + ix_to_word[str(ix.item())]
else:
break
if int(os.getenv('REMOVE_BAD_ENDINGS', '0')):
flag = 0
words = txt.split(' ')
for j in range(len(words)):
if words[-j-1] not in bad_endings:
flag = -j
break
txt = ' '.join(words[0:len(words)+flag])
out.append(txt.replace('@@ ', ''))
return out
def save_checkpoint(opt, model, infos, optimizer, histories=None, append=''):
if len(append) > 0:
append = '-' + append
# if checkpoint_path doesn't exist
print('!!!!!!!!!!!!!!!!!', opt.checkpoint_path)
if not os.path.isdir(opt.checkpoint_path):
os.makedirs(opt.checkpoint_path)
checkpoint_path = os.path.join(opt.checkpoint_path, 'model%s.pth' %(append))
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
optimizer_path = os.path.join(opt.checkpoint_path, 'optimizer%s.pth' %(append))
torch.save(optimizer.state_dict(), optimizer_path)
with open(os.path.join(opt.checkpoint_path, 'infos_'+opt.id+'%s.pkl' %(append)), 'wb') as f:
pickle_dump(infos, f)
if histories:
with open(os.path.join(opt.checkpoint_path, 'histories_'+opt.id+'%s.pkl' %(append)), 'wb') as f:
pickle_dump(histories, f)
def set_lr(optimizer, lr):
for group in optimizer.param_groups:
group['lr'] = lr
def get_lr(optimizer):
for group in optimizer.param_groups:
return group['lr']
def build_optimizer(params, opt):
if opt.optim == 'rmsprop':
return optim.RMSprop(params, opt.learning_rate, opt.optim_alpha, opt.optim_epsilon, weight_decay=opt.weight_decay)
elif opt.optim == 'adagrad':
return optim.Adagrad(params, opt.learning_rate, weight_decay=opt.weight_decay)
elif opt.optim == 'sgd':
return optim.SGD(params, opt.learning_rate, weight_decay=opt.weight_decay)
elif opt.optim == 'sgdm':
return optim.SGD(params, opt.learning_rate, opt.optim_alpha, weight_decay=opt.weight_decay)
elif opt.optim == 'sgdmom':
return optim.SGD(params, opt.learning_rate, opt.optim_alpha, weight_decay=opt.weight_decay, nesterov=True)
elif opt.optim == 'adam':
return optim.Adam(params, opt.learning_rate, (opt.optim_alpha, opt.optim_beta), opt.optim_epsilon, weight_decay=opt.weight_decay)
elif opt.optim == 'adamw':
return optim.AdamW(params, opt.learning_rate, (opt.optim_alpha, opt.optim_beta), opt.optim_epsilon, weight_decay=opt.weight_decay)
else:
raise Exception("bad option opt.optim: {}".format(opt.optim))
def penalty_builder(penalty_config):
if penalty_config == '':
return lambda x,y: y
pen_type, alpha = penalty_config.split('_')
alpha = float(alpha)
if pen_type == 'wu':
return lambda x,y: length_wu(x,y,alpha)
if pen_type == 'avg':
return lambda x,y: length_average(x,y,alpha)
def length_wu(length, logprobs, alpha=0.):
"""
NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
modifier = (((5 + length) ** alpha) /
((5 + 1) ** alpha))
return (logprobs / modifier)
def length_average(length, logprobs, alpha=0.):
"""
Returns the average probability of tokens in a sequence.
"""
return logprobs / length
class NoamOpt(object):
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def __getattr__(self, name):
return getattr(self.optimizer, name)
def state_dict(self):
state_dict = self.optimizer.state_dict()
state_dict['_step'] = self._step
return state_dict
def load_state_dict(self, state_dict):
if '_step' in state_dict:
self._step = state_dict['_step']
del state_dict['_step']
self.optimizer.load_state_dict(state_dict)
class ReduceLROnPlateau(object):
"Optim wrapper that implements rate."
def __init__(self, optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08):
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode, factor, patience, verbose, threshold, threshold_mode, cooldown, min_lr, eps)
self.optimizer = optimizer
self.current_lr = get_lr(optimizer)
def step(self):
"Update parameters and rate"
self.optimizer.step()
def scheduler_step(self, val):
self.scheduler.step(val)
self.current_lr = get_lr(self.optimizer)
def state_dict(self):
return {'current_lr':self.current_lr,
'scheduler_state_dict': self.scheduler.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict()}
def load_state_dict(self, state_dict):
if 'current_lr' not in state_dict:
# it's normal optimizer
self.optimizer.load_state_dict(state_dict)
set_lr(self.optimizer, self.current_lr) # use the lr fromt the option
else:
# it's a schduler
self.current_lr = state_dict['current_lr']
self.scheduler.load_state_dict(state_dict['scheduler_state_dict'])
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# current_lr is actually useless in this case
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def __getattr__(self, name):
return getattr(self.optimizer, name)
def get_std_opt(model, optim_func='adam', factor=1, warmup=2000):
# return NoamOpt(model.tgt_embed[0].d_model, 2, 4000,
# torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
optim_func = dict(adam=torch.optim.Adam,
adamw=torch.optim.AdamW)[optim_func]
return NoamOpt(model.d_model, factor, warmup,
optim_func(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
|
connect-caption-and-trace-main
|
captioning/utils/misc.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, labels, masks, att_masks = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
loss = crit(model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
seq, seq_logprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, att_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, att_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_orig.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from eval_utils import getCOCO
from .div_utils import compute_div_n, compute_global_div_n
import sys
try:
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
from pycocoevalcap.eval_spice import COCOEvalCapSpice
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from pycocoevalcap.bleu.bleu import Bleu
sys.path.append("cider")
from pyciderevalcap.cider.cider import Cider
except:
print('Warning: requirements for eval_multi not satisfied')
def eval_allspice(dataset, preds_n, model_id, split):
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt_n = [p for p in preds_n if p['image_id'] in valids]
print('using %d/%d predictions_n' % (len(preds_filt_n), len(preds_n)))
cache_path_n = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
json.dump(preds_filt_n, open(cache_path_n, 'w')) # serialize to temporary json file. Sigh, COCO API...
# Eval AllSPICE
cocoRes_n = coco.loadRes(cache_path_n)
cocoEvalAllSPICE = COCOEvalCapSpice(coco, cocoRes_n)
cocoEvalAllSPICE.params['image_id'] = cocoRes_n.getImgIds()
cocoEvalAllSPICE.evaluate()
out = {}
for metric, score in cocoEvalAllSPICE.eval.items():
out['All'+metric] = score
imgToEvalAllSPICE = cocoEvalAllSPICE.imgToEval
# collect SPICE_sub_score
for k in list(imgToEvalAllSPICE.values())[0]['SPICE'].keys():
if k != 'All':
out['AllSPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEvalAllSPICE.values()])
out['AllSPICE_'+k] = (out['AllSPICE_'+k][out['AllSPICE_'+k]==out['AllSPICE_'+k]]).mean()
for p in preds_filt_n:
image_id, caption = p['image_id'], p['caption']
imgToEvalAllSPICE[image_id]['caption'] = capsById[image_id]
return {'overall': out, 'imgToEvalAllSPICE': imgToEvalAllSPICE}
def eval_oracle(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
sample_n = capsById[list(capsById.keys())[0]]
for i in range(len(capsById[list(capsById.keys())[0]])):
preds = [_[i] for _ in capsById.values()]
json.dump(preds, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
imgToEval = cocoEval.imgToEval
for img_id in capsById.keys():
tmp = imgToEval[img_id]
for k in tmp['SPICE'].keys():
if k != 'All':
tmp['SPICE_'+k] = tmp['SPICE'][k]['f']
if tmp['SPICE_'+k] != tmp['SPICE_'+k]: # nan
tmp['SPICE_'+k] = -100
tmp['SPICE'] = tmp['SPICE']['All']['f']
if tmp['SPICE'] != tmp['SPICE']: tmp['SPICE'] = -100
capsById[img_id][i]['scores'] = imgToEval[img_id]
out = {'overall': {}, 'ImgToEval': {}}
for img_id in capsById.keys():
out['ImgToEval'][img_id] = {}
for metric in capsById[img_id][0]['scores'].keys():
if metric == 'image_id': continue
out['ImgToEval'][img_id]['oracle_'+metric] = max([_['scores'][metric] for _ in capsById[img_id]])
out['ImgToEval'][img_id]['avg_'+metric] = sum([_['scores'][metric] for _ in capsById[img_id]]) / len(capsById[img_id])
out['ImgToEval'][img_id]['captions'] = capsById[img_id]
for metric in list(out['ImgToEval'].values())[0].keys():
if metric == 'captions':
continue
tmp = np.array([_[metric] for _ in out['ImgToEval'].values()])
tmp = tmp[tmp!=-100]
out['overall'][metric] = tmp.mean()
return out
def eval_div_stats(dataset, preds_n, model_id, split):
tokenizer = PTBTokenizer()
capsById = {}
for i, d in enumerate(preds_n):
d['id'] = i
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
n_caps_perimg = len(capsById[list(capsById.keys())[0]])
print(n_caps_perimg)
_capsById = capsById # save the untokenized version
capsById = tokenizer.tokenize(capsById)
div_1, adiv_1 = compute_div_n(capsById,1)
div_2, adiv_2 = compute_div_n(capsById,2)
globdiv_1, _= compute_global_div_n(capsById,1)
print('Diversity Statistics are as follows: \n Div1: %.2f, Div2: %.2f, gDiv1: %d\n'%(div_1,div_2, globdiv_1))
# compute mbleu
scorer = Bleu(4)
all_scrs = []
scrperimg = np.zeros((n_caps_perimg, len(capsById)))
for i in range(n_caps_perimg):
tempRefsById = {}
candsById = {}
for k in capsById:
tempRefsById[k] = capsById[k][:i] + capsById[k][i+1:]
candsById[k] = [capsById[k][i]]
score, scores = scorer.compute_score(tempRefsById, candsById)
all_scrs.append(score)
scrperimg[i,:] = scores[1]
all_scrs = np.array(all_scrs)
out = {}
out['overall'] = {'Div1': div_1, 'Div2': div_2, 'gDiv1': globdiv_1}
for k, score in zip(range(4), all_scrs.mean(axis=0).tolist()):
out['overall'].update({'mBLeu_%d'%(k+1): score})
imgToEval = {}
for i,imgid in enumerate(capsById.keys()):
imgToEval[imgid] = {'mBleu_2' : scrperimg[:,i].mean()}
imgToEval[imgid]['individuals'] = []
for j, d in enumerate(_capsById[imgid]):
imgToEval[imgid]['individuals'].append(preds_n[d['id']])
imgToEval[imgid]['individuals'][-1]['mBleu_2'] = scrperimg[j,i]
out['ImgToEval'] = imgToEval
print('Mean mutual Bleu scores on this set is:\nmBLeu_1, mBLeu_2, mBLeu_3, mBLeu_4')
print(all_scrs.mean(axis=0))
return out
def eval_self_cider(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# Get Cider_scorer
Cider_scorer = Cider(df='corpus')
tokenizer = PTBTokenizer()
gts = {}
for imgId in valids:
gts[imgId] = coco.imgToAnns[imgId]
gts = tokenizer.tokenize(gts)
for imgId in valids:
Cider_scorer.cider_scorer += (None, gts[imgId])
Cider_scorer.cider_scorer.compute_doc_freq()
Cider_scorer.cider_scorer.ref_len = np.log(float(len(Cider_scorer.cider_scorer.crefs)))
# Prepare captions
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
capsById = tokenizer.tokenize(capsById)
imgIds = list(capsById.keys())
scores = Cider_scorer.my_self_cider([capsById[_] for _ in imgIds])
def get_div(eigvals):
eigvals = np.clip(eigvals, 0, None)
return -np.log(np.sqrt(eigvals[-1]) / (np.sqrt(eigvals).sum())) / np.log(len(eigvals))
sc_scores = [get_div(np.linalg.eigvalsh(_/10)) for _ in scores]
score = np.mean(np.array(sc_scores))
imgToEval = {}
for i, image_id in enumerate(imgIds):
imgToEval[image_id] = {'self_cider': sc_scores[i], 'self_cider_mat': scores[i].tolist()}
return {'overall': {'self_cider': score}, 'imgToEval': imgToEval}
return score
|
connect-caption-and-trace-main
|
captioning/utils/eval_multi.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copy from fvcore
import logging
import os
from typing import Any
import yaml
from yacs.config import CfgNode as _CfgNode
import io as PathManager
BASE_KEY = "_BASE_"
class CfgNode(_CfgNode):
"""
Our own extended version of :class:`yacs.config.CfgNode`.
It contains the following extra features:
1. The :meth:`merge_from_file` method supports the "_BASE_" key,
which allows the new CfgNode to inherit all the attributes from the
base configuration file.
2. Keys that start with "COMPUTED_" are treated as insertion-only
"computed" attributes. They can be inserted regardless of whether
the CfgNode is frozen or not.
3. With "allow_unsafe=True", it supports pyyaml tags that evaluate
expressions in config. See examples in
https://pyyaml.org/wiki/PyYAMLDocumentation#yaml-tags-and-python-types
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
"""
@staticmethod
def load_yaml_with_base(filename, allow_unsafe = False):
"""
Just like `yaml.load(open(filename))`, but inherit attributes from its
`_BASE_`.
Args:
filename (str): the file name of the current config. Will be used to
find the base config file.
allow_unsafe (bool): whether to allow loading the config file with
`yaml.unsafe_load`.
Returns:
(dict): the loaded yaml
"""
with PathManager.open(filename, "r") as f:
try:
cfg = yaml.safe_load(f)
except yaml.constructor.ConstructorError:
if not allow_unsafe:
raise
logger = logging.getLogger(__name__)
logger.warning(
"Loading config {} with yaml.unsafe_load. Your machine may "
"be at risk if the file contains malicious content.".format(
filename
)
)
f.close()
with open(filename, "r") as f:
cfg = yaml.unsafe_load(f)
def merge_a_into_b(a, b):
# merge dict a into dict b. values in a will overwrite b.
for k, v in a.items():
if isinstance(v, dict) and k in b:
assert isinstance(
b[k], dict
), "Cannot inherit key '{}' from base!".format(k)
merge_a_into_b(v, b[k])
else:
b[k] = v
if BASE_KEY in cfg:
base_cfg_file = cfg[BASE_KEY]
if base_cfg_file.startswith("~"):
base_cfg_file = os.path.expanduser(base_cfg_file)
if not any(
map(base_cfg_file.startswith, ["/", "https://", "http://"])
):
# the path to base cfg is relative to the config file itself.
base_cfg_file = os.path.join(
os.path.dirname(filename), base_cfg_file
)
base_cfg = CfgNode.load_yaml_with_base(
base_cfg_file, allow_unsafe=allow_unsafe
)
del cfg[BASE_KEY]
merge_a_into_b(cfg, base_cfg)
return base_cfg
return cfg
def merge_from_file(self, cfg_filename, allow_unsafe = False):
"""
Merge configs from a given yaml file.
Args:
cfg_filename: the file name of the yaml config.
allow_unsafe: whether to allow loading the config file with
`yaml.unsafe_load`.
"""
loaded_cfg = CfgNode.load_yaml_with_base(
cfg_filename, allow_unsafe=allow_unsafe
)
loaded_cfg = type(self)(loaded_cfg)
self.merge_from_other_cfg(loaded_cfg)
# Forward the following calls to base, but with a check on the BASE_KEY.
def merge_from_other_cfg(self, cfg_other):
"""
Args:
cfg_other (CfgNode): configs to merge from.
"""
assert (
BASE_KEY not in cfg_other
), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super().merge_from_other_cfg(cfg_other)
def merge_from_list(self, cfg_list):
"""
Args:
cfg_list (list): list of configs to merge from.
"""
keys = set(cfg_list[0::2])
assert (
BASE_KEY not in keys
), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super().merge_from_list(cfg_list)
def __setattr__(self, name, val):
if name.startswith("COMPUTED_"):
if name in self:
old_val = self[name]
if old_val == val:
return
raise KeyError(
"Computed attributed '{}' already exists "
"with a different value! old={}, new={}.".format(
name, old_val, val
)
)
self[name] = val
else:
super().__setattr__(name, val)
if __name__ == '__main__':
cfg = CfgNode.load_yaml_with_base('configs/updown_long.yml')
print(cfg)
|
connect-caption-and-trace-main
|
captioning/utils/config.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .local_optimal_transport import local_OT
from ..models import utils as utils_models
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
# annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, task='caption', eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# assert task
assert task in ['caption', 'trace', 'both', 'show']
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
grounding_quality_loss = [] # for donstream task 2
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
print(num_images)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks'], data['show_labels'], data['show_trace_feats'], data['show_trace_masks'], data['show_masks'], data['show_gate_labels']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, \
trace_masks, show_labels, show_trace_feats, show_trace_masks, show_masks, show_gate_labels = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
if task == 'caption':
loss = 0
# loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, task=task), labels[..., 1:], masks[..., 1:]).item()
elif task == 'show':
loss = crit(
model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1], att_masks,
show_trace_masks, show_gate_labels, task=task), show_labels[..., 1:], show_masks[..., 1:]).item()
elif task == 'both':
loss = crit(
model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks,
task=task)[0], labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
test_grounding_quality = True
test_baseline = False
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
### repeat att feats
if test_grounding_quality:
fc_feats, att_feats, att_masks, box_feats = utils_models.repeat_tensors(5,
[fc_feats, att_feats, att_masks, box_feats]
)
#############################
if task == 'both':
seq, seq_logprobs, _ = model(fc_feats, att_feats, show_trace_feats[:att_feats.shape[0]], box_feats, att_masks, show_trace_masks[:att_feats.shape[0]],
show_gate_labels[:att_feats.shape[0]], task, opt=tmp_eval_kwargs, mode='sample')
# use gt-truth to get prediction
_, trace_output = model(fc_feats, att_feats, show_trace_feats[:,:17],
box_feats,
show_labels[..., :-1].squeeze(1),
att_masks, show_masks.squeeze(1)[:,:17],
task='both')
# ### debug try using trace to give trace output
# print(show_trace_feats.shape, show_labels.shape, show_masks.shape)
# trace_output = model(fc_feats, att_feats, show_trace_feats[:, :17], box_feats,
# show_labels[..., :-1].squeeze(1),
# att_masks, show_masks.squeeze(1), task='trace')
else:
if test_baseline is True and task == 'caption':
seq, seq_logprobs, word_box_attn = model(fc_feats, att_feats, show_trace_feats[:att_feats.shape[0]], box_feats,
att_masks, show_trace_masks[:att_feats.shape[0]],
show_gate_labels[:att_feats.shape[0]], task, opt=tmp_eval_kwargs,
mode='sample')
else:
seq, seq_logprobs = model(fc_feats, att_feats, show_trace_feats[:att_feats.shape[0]], box_feats,
att_masks, show_trace_masks[:att_feats.shape[0]], show_gate_labels[:att_feats.shape[0]], task, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
### log which caption has no bounding box
ids_no_box = (show_trace_feats[:, 0, 4] == 1).float()
# only focus on the gt-words
if test_grounding_quality:
batch_grounding_loss = []
if test_baseline:
word_box_attn = torch.argmax(word_box_attn, dim=-1)
# match the generated word with the show-caption
show_labels = show_labels[:, :, 1:-1]
for i in range(seq.shape[0]):
# for j in range(seq.shape[1]):
for k in range(show_labels.shape[2]):
if show_trace_feats[i, k, 4] != 1 and show_labels[i, 0, k] != 0:
# if show_trace_feats[i, k, 4] != 1 and show_labels[i, 0, k] != 0 \
# and seq[i, k] == show_labels[
# i, 0, k]: # the word match with the key word and show_labels[i,0,k] != 1
gt_box = show_trace_feats[i, k] # get the grounding box
if test_baseline:
pred_box_idx = word_box_attn[i, k].long()
pred_box = box_feats[i, pred_box_idx] # get the predicted box
else:
pred_box = trace_output[i, k]
# print(gt_box, pred_box, seq[i,j])
tmp_loss = torch.mean(torch.abs(gt_box[:4] - pred_box[:4]))
batch_grounding_loss.append(tmp_loss.item())
### compute the grounding quality
# if test_grounding_quality:
# batch_grounding_loss = []
# if test_baseline:
# word_box_attn = torch.argmax(word_box_attn, dim=-1)
# # match the generated word with the show-caption
# for i in range(seq.shape[0]):
# for j in range(seq.shape[1]):
# for k in range(show_labels.shape[2]):
# if show_trace_feats[i,k,4]!=1 and show_labels[i,0,k] != 0 \
# and seq[i,j] == show_labels[i,0,k]: # the word match with the key word and show_labels[i,0,k] != 1
# gt_box = show_trace_feats[i, k] # get the grounding box
# if test_baseline:
# pred_box_idx = word_box_attn[i,j].long()
# pred_box = box_feats[i, pred_box_idx] # get the predicted box
# else:
# pred_box = trace_output[i, j]
# # print(gt_box, pred_box, seq[i,j])
# tmp_loss = torch.mean(torch.abs(gt_box[:4] - pred_box[:4]))
# batch_grounding_loss.append(tmp_loss.item())
# else:
# assert task == 'both'
# for i in range(seq.shape[0]):
# for j in range(seq.shape[1]):
# for k in range(show_labels.shape[2]):
# if seq[i, j] != 0 and seq[i, j] == show_labels[i, 0, k]: # the word match with the key word
# gt_box = show_trace_feats[i, k] # get the grounding box
# pred_box = trace_output[i,j]
# tmp_loss = torch.mean(torch.abs(gt_box[:4] - pred_box[:4]))
# batch_grounding_loss.append(tmp_loss.item())
grounding_quality_loss.append(np.mean(np.array(batch_grounding_loss)))
print('Visual grounding quality running ave: ', np.mean(np.array(grounding_quality_loss)))
seq = seq.reshape([-1, 5, 20])[:,0,:]
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
# entry to evaluate show-control-tell: seperate the 5 predictions per image
# if ids_no_box[k]==1:
# continue
# entry = {'image_id': data['infos'][k//5]['id'] + 1000000 * (k%5), 'caption': sent, 'perplexity': perplexity[k].item(),
# 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
print('Total visual grounding quality loss:', np.mean(np.array(grounding_quality_loss)))
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
# print(data['infos'][0]['id'])
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
loss_prev_gt_list = []
acc_list = []
with torch.no_grad():
use_local_OT = False
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
# trace_class_label = trace_feats[:,:,5] - 1
# pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
# prev_gt_correct
prev_gt_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')
prev_gt_out = prev_gt_out * trace_masks.unsqueeze(2)
# print(prev_gt_out[0, :, :5])
loss_prev_gt_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
loss_prev_gt = ((torch.abs(prev_gt_out[:, :, :4] - trace_feats[:, :, :4]) * loss_prev_gt_mask).sum() / (
loss_prev_gt_mask.sum() * 4)).item()
loss_prev_gt_list.append(loss_prev_gt)
for i in range(trace_feats.shape[1]):
# for regression
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
tmp_trace_feats[:, i, :5] = curr_out
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks) # for non-iteratively
# break # for non-iteratively
# tmp_trace_feats = curr_out # for non-iteratively
# ### save for visualization # for visualization of trace_generation
# vis_img_id = data['infos'][0]['id']
# np.save('./vis/trace_generation_2/pred_trace_'+str(vis_img_id), tmp_trace_feats[:,:,:4].detach().cpu().numpy())
# np.save('./vis/trace_generation_2/gt_trace_' + str(vis_img_id), trace_feats[:,:,:4].detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open('./vis/trace_generation_2/info.txt', 'a') as f:
# f.write('img_id:%d, l1-loss: %f\n'%(vis_img_id,(crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()))
# f.close()
# ############################
# tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
if use_local_OT:
D = torch.abs(tmp_trace_feats[:, :, :4].unsqueeze(2) - trace_feats[:, :, :4].unsqueeze(1)).mean(dim=-1)
T = local_OT(D).to(tmp_trace_feats.device)
loss = ((torch.abs(torch.matmul(tmp_trace_feats[:, :, :4].transpose(1, 2), T).transpose(1, 2) -
trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
print('loss', loss, 'loss_orig', (
(torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
loss_mask.sum() * 4)).item())
else:
loss = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
# loss = (crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; prev_gt_loss:', loss_prev_gt)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
val_loss_prev_gt = np.mean(np.array(loss_prev_gt_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; prev_gt_loss:', val_loss_prev_gt)
model.train()
return val_loss
def eval_trace_generation_classification(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
acc_list = []
with torch.no_grad():
# get the loss in terms of cross-entropy
model_outputs = model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks,
trace_masks, 'trace')
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
trace_class_label = trace_feats[:, :, 5] - 1
trace_class_label = trace_class_label.view(-1).long()
loss_ce = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1).item()
loss_ce_list.append(loss_ce)
gt_prev_acc = (((model_outputs.argmax(dim=1) == trace_class_label)*(trace_class_label!=-1).float()).sum() / \
(trace_class_label != -1).float().sum()).item()
gt_prev_acc_list.append(gt_prev_acc)
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats
trace_class_label = trace_feats[:,:,5] - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
# for classification
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:,i]
curr_out = curr_out.argmax(dim=-1)
pred_class_label[:, i] = curr_out
curr_out = box_feats[np.arange(box_feats.shape[0]), curr_out]
tmp_trace_feats[:, i, :5] = curr_out
print('prev_gt_class_label', model_outputs.argmax(dim=1).view(pred_class_label.shape)[0])
print('pred_class_label',pred_class_label[0])
classification_acc = ((pred_class_label == trace_class_label) * trace_masks).sum() / trace_masks.sum()
acc_list.append(classification_acc.item())
tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss = crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; loss_ce:', loss_ce, '; classification-acc:', classification_acc.item(), '; gt_prev_acc:', gt_prev_acc)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss_ce = np.mean(np.array(loss_ce_list))
val_gt_prev_acc = np.mean(np.array(gt_prev_acc_list))
val_loss = np.mean(np.array(loss_list))
val_acc = np.mean(np.array(acc_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; loss_ce:', val_loss_ce, '; classification-acc:', val_acc, '; gt_prev_acc:', val_gt_prev_acc)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_for_coco_caption.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .local_optimal_transport import local_OT
from ..models import utils as utils_models
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval_show_control_tell import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
# annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, task='caption', eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# assert task
assert task in ['caption', 'trace', 'both', 'show']
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
print(num_images)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks'], data['show_labels'], data['show_trace_feats'], data['show_trace_masks'], data['show_masks'], data['show_gate_labels']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, \
trace_masks, show_labels, show_trace_feats, show_trace_masks, show_masks, show_gate_labels = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
if task == 'caption':
loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, task=task), labels[..., 1:], masks[..., 1:]).item()
elif task == 'show':
loss = crit(
model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1], att_masks,
show_trace_masks, show_gate_labels, task=task), show_labels[..., 1:], show_masks[..., 1:]).item()
elif task == 'both':
loss = crit(
model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks,
task=task)[0], labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
### repeat att feats
fc_feats, att_feats, att_masks, box_feats = utils_models.repeat_tensors(5,
[fc_feats, att_feats, att_masks, box_feats]
)
#############################
seq, seq_logprobs = model(fc_feats, att_feats, show_trace_feats, box_feats, att_masks, show_trace_masks, show_gate_labels, task, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
### log which caption has no bounding box
ids_no_box = (show_trace_feats[:, 0, 4] == 1).float()
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
# entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
# entry to evaluate show-control-tell: seperate the 5 predictions per image
if ids_no_box[k]==1:
continue
entry = {'image_id': data['infos'][k//5]['id'] + 1000000 * (k%5), 'caption': sent, 'perplexity': perplexity[k].item(),
'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
# print(data['infos'][0]['id'])
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
loss_prev_gt_list = []
acc_list = []
with torch.no_grad():
use_local_OT = False
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
# trace_class_label = trace_feats[:,:,5] - 1
# pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
# prev_gt_correct
prev_gt_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')
prev_gt_out = prev_gt_out * trace_masks.unsqueeze(2)
# print(prev_gt_out[0, :, :5])
loss_prev_gt_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
loss_prev_gt = ((torch.abs(prev_gt_out[:, :, :4] - trace_feats[:, :, :4]) * loss_prev_gt_mask).sum() / (
loss_prev_gt_mask.sum() * 4)).item()
loss_prev_gt_list.append(loss_prev_gt)
for i in range(trace_feats.shape[1]):
# for regression
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
tmp_trace_feats[:, i, :5] = curr_out
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks) # for non-iteratively
# break # for non-iteratively
# tmp_trace_feats = curr_out # for non-iteratively
# ### save for visualization # for visualization of trace_generation
# vis_img_id = data['infos'][0]['id']
# np.save('./vis/trace_generation_2/pred_trace_'+str(vis_img_id), tmp_trace_feats[:,:,:4].detach().cpu().numpy())
# np.save('./vis/trace_generation_2/gt_trace_' + str(vis_img_id), trace_feats[:,:,:4].detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open('./vis/trace_generation_2/info.txt', 'a') as f:
# f.write('img_id:%d, l1-loss: %f\n'%(vis_img_id,(crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()))
# f.close()
# ############################
# tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
if use_local_OT:
D = torch.abs(tmp_trace_feats[:, :, :4].unsqueeze(2) - trace_feats[:, :, :4].unsqueeze(1)).mean(dim=-1)
T = local_OT(D).to(tmp_trace_feats.device)
loss = ((torch.abs(torch.matmul(tmp_trace_feats[:, :, :4].transpose(1, 2), T).transpose(1, 2) -
trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
print('loss', loss, 'loss_orig', (
(torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
loss_mask.sum() * 4)).item())
else:
loss = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
# loss = (crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; prev_gt_loss:', loss_prev_gt)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
val_loss_prev_gt = np.mean(np.array(loss_prev_gt_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; prev_gt_loss:', val_loss_prev_gt)
model.train()
return val_loss
def eval_trace_generation_classification(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
acc_list = []
with torch.no_grad():
# get the loss in terms of cross-entropy
model_outputs = model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks,
trace_masks, 'trace')
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
trace_class_label = trace_feats[:, :, 5] - 1
trace_class_label = trace_class_label.view(-1).long()
loss_ce = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1).item()
loss_ce_list.append(loss_ce)
gt_prev_acc = (((model_outputs.argmax(dim=1) == trace_class_label)*(trace_class_label!=-1).float()).sum() / \
(trace_class_label != -1).float().sum()).item()
gt_prev_acc_list.append(gt_prev_acc)
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats
trace_class_label = trace_feats[:,:,5] - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
# for classification
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:,i]
curr_out = curr_out.argmax(dim=-1)
pred_class_label[:, i] = curr_out
curr_out = box_feats[np.arange(box_feats.shape[0]), curr_out]
tmp_trace_feats[:, i, :5] = curr_out
print('prev_gt_class_label', model_outputs.argmax(dim=1).view(pred_class_label.shape)[0])
print('pred_class_label',pred_class_label[0])
classification_acc = ((pred_class_label == trace_class_label) * trace_masks).sum() / trace_masks.sum()
acc_list.append(classification_acc.item())
tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss = crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; loss_ce:', loss_ce, '; classification-acc:', classification_acc.item(), '; gt_prev_acc:', gt_prev_acc)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss_ce = np.mean(np.array(loss_ce_list))
val_gt_prev_acc = np.mean(np.array(gt_prev_acc_list))
val_loss = np.mean(np.array(loss_list))
val_acc = np.mean(np.array(acc_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; loss_ce:', val_loss_ce, '; classification-acc:', val_acc, '; gt_prev_acc:', val_gt_prev_acc)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_show_control_tell.py
|
connect-caption-and-trace-main
|
captioning/utils/__init__.py
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from captioning.utils import misc as utils
# from .local_optimal_transport import local_OT
# load coco-caption if available
try:
sys.path.append("coco-caption")
sys.path.append("/home/zihang/Research/Localized_Narratives/ImageCaptioning.pytorch")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap #COCOEvalCap_spice
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
# annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set
elif 'flickr30k' in dataset or 'f30k' in dataset or 'flk30k' in dataset:
annFile = 'coco-caption/annotations/captions_flk30k_LN_test.json'
elif 'ade20k' in dataset:
annFile = 'coco-caption/annotations/captions_ade20k_LN_test.json'
elif 'openimg' in dataset:
annFile = 'coco-caption/annotations/captions_openimg_LN_test.json'
print(annFile)
return COCO(annFile)
cache_path = '/home/zihang/Research/Localized_Narratives/ImageCaptioning.pytorch/eval_results/zihang_transformer_LN_try804_openimg_twolayer_joint_cycle_b_val.json'
score_list = []
l = len(json.load(open(cache_path)))
size_per_split = 1000000
num_splits = (l//size_per_split) + (1 if (l%size_per_split)!=0 else 0)
for i in range(num_splits):
coco = getCOCO('openimg')
valids = coco.getImgIds()
cocoRes = coco.loadRes(cache_path)#, split=i, size_per_split = size_per_split)
cocoEval = COCOEvalCap(coco, cocoRes) #_spice
cocoEval.params['image_id'] = cocoRes.getImgIds()
try:
cocoEval.evaluate()
except:
print('this split fail: #', i)
continue
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
score_list.append(score)
print(i, '-th current_split:', score, 'Overall ave:', sum(score_list) / len(score_list))
print(score_list)
print(sum(score_list) / len(score_list))
# # Add mean perplexity
# out['perplexity'] = mean_perplexity
# out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_' + k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_' + k] = (out['SPICE_' + k][out['SPICE_' + k] == out['SPICE_' + k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
|
connect-caption-and-trace-main
|
captioning/utils/for_debug_eval_spice.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class myResnet(nn.Module):
def __init__(self, resnet):
super(myResnet, self).__init__()
self.resnet = resnet
def forward(self, img, att_size=14):
x = img.unsqueeze(0)
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
fc = x.mean(3).mean(2).squeeze()
att = F.adaptive_avg_pool2d(x,[att_size,att_size]).squeeze().permute(1, 2, 0)
return fc, att
|
connect-caption-and-trace-main
|
captioning/utils/resnet_utils.py
|
import torch
import torch.nn as nn
import torchvision.models.resnet
from torchvision.models.resnet import BasicBlock, Bottleneck
class ResNet(torchvision.models.resnet.ResNet):
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__(block, layers, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
for i in range(2, 5):
getattr(self, 'layer%d'%i)[0].conv1.stride = (2,2)
getattr(self, 'layer%d'%i)[0].conv2.stride = (1,1)
def resnet18(pretrained=False):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
connect-caption-and-trace-main
|
captioning/utils/resnet.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .local_optimal_transport import local_OT
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_coco_LN_test.json'
elif 'flickr30k' in dataset or 'f30k' in dataset or 'flk30k' in dataset:
annFile = 'coco-caption/annotations/captions_flk30k_LN_test.json'
elif 'ade20k' in dataset:
annFile = 'coco-caption/annotations/captions_ade20k_LN_test.json'
elif 'openimg' in dataset:
annFile = 'coco-caption/annotations/captions_openimg_LN_test.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, task='caption', eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# assert task
assert task in ['caption', 'trace', 'both']
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
trace_cost = []
while True:
data = loader.get_batch(split)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'], data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
if task == 'caption':
loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, task=task), labels[..., 1:], masks[..., 1:]).item()
elif task == 'both':
loss = crit(
model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks,
task=task)[0], labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
if task == 'both':
seq, seq_logprobs, trace_predicted = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks,
task=task, opt=tmp_eval_kwargs, mode='sample')
else:
try:
seq, seq_logprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, task=task, opt=tmp_eval_kwargs, mode='sample')
except:
print('evaluation meet error')
continue
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
if task == 'both':
### compute the loss for trace
for k in range(trace_predicted.shape[0]):
tmp_gt_length = trace_masks[k].sum().long()
tmp_gt_trace = trace_feats[k, :tmp_gt_length]
tmp_pred_length = (seq[k]>0).sum().long()
tmp_pred_trace = trace_predicted[k, :tmp_pred_length]
# choose only boxes not [0,0,1,1,1] in the ground truth
nonzero_idx = torch.nonzero(tmp_gt_trace[:, 4] != 1).squeeze()
tmp_gt_trace = tmp_gt_trace[nonzero_idx]
if len(tmp_gt_trace.shape) < 2: # if there is only one chosen box in this trace
tmp_gt_trace = tmp_gt_trace.unsqueeze(0)
tmp_gt_trace = tmp_gt_trace.unsqueeze(0)
tmp_pred_trace = tmp_pred_trace.unsqueeze(0)
if tmp_pred_trace.shape[1] <= tmp_gt_trace.shape[1]:
tmp_trace1 = tmp_pred_trace
tmp_trace2 = tmp_gt_trace
else:
tmp_trace1 = tmp_gt_trace
tmp_trace2 = tmp_pred_trace
# processing in terms of segments of length 20
seg_loss_list = []
for seg_idx in range(np.ceil(tmp_trace1.shape[1] / 20).astype(int)):
tmp_const = 20. * tmp_trace2.shape[1] / tmp_trace1.shape[1]
seg_tmp_trace1 = tmp_trace1[:, seg_idx * 20:(seg_idx + 1) * 20, :4]
seg_tmp_trace2 = tmp_trace2[:, np.floor(seg_idx * tmp_const).astype(int): np.ceil(
(seg_idx + 1) * tmp_const).astype(int), :4]
D = torch.abs(seg_tmp_trace1.unsqueeze(2) - seg_tmp_trace2.unsqueeze(1)).mean(dim=-1)
seg_tmp_T = local_OT(D, window = 0)
seg_tmp_cost = (seg_tmp_T * D).sum() / seg_tmp_trace1.shape[1]
if not torch.isnan(seg_tmp_cost):
seg_loss_list.append(seg_tmp_cost.item())
tmp_cost = np.mean(np.array(seg_loss_list))
if not np.isnan(tmp_cost):
trace_cost.append(tmp_cost)
print('trace LBM distance:', tmp_cost)
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
print('both trace running ave LBM loss :', np.mean(np.array(trace_cost)))
# ### save for visualization # for visualization of trace_generation
# for i in range(len(sents)):
# vis_img_id = data['infos'][i]['id']
# with open('./vis/both_generation_supplement/pred_caption/pred_caption_' + str(vis_img_id)+'.txt', 'w') as f:
# f.write(sents[i])
# np.save('./vis/both_generation_supplement/pred_trace/pred_trace_' + str(vis_img_id),
# trace_predicted[i, :, :4].detach().cpu().numpy())
# print(vis_img_id, trace_feats.shape)
# with open('./vis/both_generation_supplement/info.txt', 'a') as f:
# f.write('img_id:%d\n' %vis_img_id)
# f.close()
# ############################
# ### save for visualization # for visualization of caption_generation
# for i in range(len(sents)):
# vis_img_id = data['infos'][i]['id']
# tmp_dir = './vis/caption_generation_' + eval_kwargs['dataset_choice']
# if not os.path.exists(tmp_dir):
# os.makedirs(tmp_dir)
# os.makedirs(tmp_dir + '/pred_caption')
# os.makedirs(tmp_dir + '/gt_trace')
# with open('./vis/caption_generation_'+ eval_kwargs['dataset_choice'] +'/pred_caption/pred_caption_' + str(vis_img_id) + '.txt',
# 'w') as f:
# f.write(sents[i])
# np.save('./vis/caption_generation_'+ eval_kwargs['dataset_choice'] +'/gt_trace/gt_trace_' + str(vis_img_id),
# trace_feats[i, :, :4].detach().cpu().numpy())
# print(vis_img_id, trace_feats.shape)
# with open('./vis/caption_generation_'+ eval_kwargs['dataset_choice'] +'/info.txt', 'a') as f:
# f.write('img_id:%s\n' % str(vis_img_id))
# f.close()
# ############################
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
ix1 = data['bounds']['it_max']
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
if task == 'both':
print('both trace total LBM loss:', np.mean(np.array(trace_cost)))
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, window_size=0, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
loss_list = []
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
with torch.no_grad():
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
for i in range(trace_feats.shape[1]):
# for regression
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
tmp_trace_feats[:, i, :5] = curr_out
# # ### save for visualization # for visualization of trace_generation
# sents = utils.decode_sequence(model.vocab, labels[:, 0, 1:])
# print(sents)
# loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
# vis_img_id = data['infos'][0]['id']
# tmp_dir = './vis/trace_generation_' + eval_kwargs['dataset_choice']
# if not os.path.exists(tmp_dir):
# os.makedirs(tmp_dir)
# os.makedirs(tmp_dir + '/pred_trace')
# os.makedirs(tmp_dir + '/gt_trace')
# os.makedirs(tmp_dir + '/gt_caption')
# with open(tmp_dir + '/gt_caption/' + str(vis_img_id)+'.txt', 'w') as f:
# f.write(sents[0])
# # np.save('./vis/trace_generation_11_14/pred_caption_' + str(vis_img_id),
# # labels[..., 1:].detach().cpu().numpy())
# np.save(tmp_dir + '/pred_trace/' +str(vis_img_id), tmp_trace_feats[:,:,:4].detach().cpu().numpy())
# np.save(tmp_dir + '/gt_trace/' + str(vis_img_id), trace_feats[:,:,:4].detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open(tmp_dir + '/info.txt', 'a') as f:
# f.write('img_id:%s, l1-loss: %f\n'%(str(vis_img_id),((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()))
# f.close()
# # ############################
use_local_OT = True #
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
if use_local_OT:
batch_loss_list = []
for idx_trace in range(trace_feats.shape[0]):
tmp_gt_length = trace_masks[idx_trace].sum().long()
single_tmp_trace_feats = tmp_trace_feats[idx_trace, :tmp_gt_length]
single_trace_feats = trace_feats[idx_trace, :tmp_gt_length]
# choose only boxes not [0,0,1,1,1] in the ground truth
nonzero_idx = torch.nonzero(single_trace_feats[:,4]!=1).squeeze()
single_trace_feats = single_trace_feats[nonzero_idx]
if len(single_trace_feats.shape) < 2: # if there is only one chosen box in this trace
single_trace_feats = single_trace_feats.unsqueeze(0)
single_tmp_trace_feats = single_tmp_trace_feats.unsqueeze(0)
single_trace_feats = single_trace_feats.unsqueeze(0)
if single_tmp_trace_feats.shape[1] <= single_trace_feats.shape[1]:
tmp_trace1 = single_tmp_trace_feats
tmp_trace2 = single_trace_feats
else:
tmp_trace1 = single_trace_feats
tmp_trace2 = single_tmp_trace_feats
# processing in terms of segments of length 20
seg_loss_list = []
for seg_idx in range(np.ceil(tmp_trace1.shape[1]/20).astype(int)):
tmp_const = 20. * tmp_trace2.shape[1] / tmp_trace1.shape[1]
seg_tmp_trace1 = tmp_trace1[:, seg_idx*20:(seg_idx+1)*20, :4]
seg_tmp_trace2 = tmp_trace2[:, np.floor(seg_idx*tmp_const).astype(int) : np.ceil((seg_idx+1)*tmp_const).astype(int) , :4]
D = torch.abs(seg_tmp_trace1.unsqueeze(2) - seg_tmp_trace2.unsqueeze(1)).mean(dim=-1)
seg_tmp_T = local_OT(D, window = window_size)
seg_tmp_cost = (seg_tmp_T * D ).sum() / seg_tmp_trace1.shape[1]
if not torch.isnan(seg_tmp_cost):
seg_loss_list.append(seg_tmp_cost.item())
if len(seg_loss_list) != 0:
batch_loss_list.append(np.mean(np.array(seg_loss_list)))
loss = np.mean(np.array(batch_loss_list))
# loss_orig = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
else:
loss = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
# loss_orig = loss
if not np.isnan(loss):
loss_list.append(loss)
# loss_orig_list.append(loss_orig)
print('Running ave l1 loss:', np.mean(np.array(loss_list))) #, np.mean(np.array(loss_orig_list)))
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
print('Validation evaluation:', 'l1-loss:', val_loss)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_joint.py
|
from __future__ import print_function
import argparse
def if_use_feat(caption_model):
# Decide if load attention feature according to caption model
if caption_model in ['show_tell', 'all_img', 'fc', 'newfc']:
use_att, use_fc = False, True
elif caption_model == 'language_model':
use_att, use_fc = False, False
elif caption_model in ['updown', 'topdown']:
use_fc, use_att = True, True
else:
use_att, use_fc = True, False
return use_fc, use_att
def parse_opt():
parser = argparse.ArgumentParser()
# training task: caption / trace / c_joint_t (joint training of controlled caption/trace generation)
# / pred_both (the task of predicting both caption and trace at the same time)
parser.add_argument('--task', type=str, default='c_joint_t',
help='The task to train on. Choose from caption/trace/c_joint_t/pred_both')
parser.add_argument('--eval_task', type=str, default='caption',
help='The task to evaluate on. Choose from caption/trace/pred_both')
# Data input settings
parser.add_argument('--input_json', type=str, default='data/coco.json',
help='path to the json file containing additional info and vocab')
parser.add_argument('--input_fc_dir', type=str, default='data/cocotalk_fc',
help='path to the directory containing the preprocessed fc feats')
parser.add_argument('--input_att_dir', type=str, default='data/cocotalk_att',
help='path to the directory containing the preprocessed att feats')
parser.add_argument('--input_box_dir', type=str, default='data/cocotalk_box',
help='path to the directory containing the boxes of att feats')
parser.add_argument('--input_trace_dir', type=str, default='data/cocoLN_trace_box_by_04second',
help='path to the directory containing the trace feats')
parser.add_argument('--input_trace_class_label_dir', type=str, default='data/trace_by_word_classification_label',
help='path to the directory containing the trace feats')
parser.add_argument('--input_trace_feat_dir', type=str, default='/mnt/m2/Datasets/COCO/extracted_LN_trace_features_coco_by_word_correct_length',
help='path to trace feats (T*1024/2048)')
parser.add_argument('--input_label_h5', type=str, default='data/coco_label.h5',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--data_in_memory', action='store_true',
help='True if we want to save the features in memory')
parser.add_argument('--start_from', type=str, default=None,
help="""continue training from saved model at this path. Path must contain files saved by previous training process:
'infos.pkl' : configuration;
'model.pth' : weights
""")
parser.add_argument('--cached_tokens', type=str, default='coco-train-idxs',
help='Cached token file for calculating cider score during self critical training.')
# Model settings
parser.add_argument('--caption_model', type=str, default="show_tell",
help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, att2all2, adaatt, adaattmo, updown, stackatt, denseatt, transformer')
parser.add_argument('--rnn_size', type=int, default=512,
help='size of the rnn in number of hidden nodes in each layer')
parser.add_argument('--num_layers', type=int, default=1,
help='number of layers in the RNN')
parser.add_argument('--rnn_type', type=str, default='lstm',
help='rnn, gru, or lstm')
parser.add_argument('--input_encoding_size', type=int, default=512,
help='the encoding size of each token in the vocabulary, and the image.')
parser.add_argument('--att_hid_size', type=int, default=512,
help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer')
parser.add_argument('--fc_feat_size', type=int, default=2048,
help='2048 for resnet, 4096 for vgg')
parser.add_argument('--att_feat_size', type=int, default=2048,
help='2048 for resnet, 512 for vgg')
parser.add_argument('--logit_layers', type=int, default=1,
help='number of layers in the RNN')
parser.add_argument('--use_bn', type=int, default=0,
help='If 1, then do batch_normalization first in att_embed, if 2 then do bn both in the beginning and the end of att_embed')
# feature manipulation
parser.add_argument('--norm_att_feat', type=int, default=0,
help='If normalize attention features')
parser.add_argument('--use_box', type=int, default=0,
help='If use box features')
parser.add_argument('--norm_box_feat', type=int, default=0,
help='If use box, do we normalize box feature')
# Add trace information from Localized Narratives: shape = Tx5
parser.add_argument('--use_trace', type=int, default=0,
help='If use trace features')
# Add trace feature: shape = Tx1024(2048 later)
parser.add_argument('--use_trace_feat', type=int, default=0,
help='If use trace features')
parser.add_argument('--dataset_choice', type=str, default='coco',
help='use coco or flk30k or others?')
parser.add_argument('--trace_max_length', type=int, default=225,
help='max length of trace/sentences')
# Optimization: General
parser.add_argument('--max_epochs', type=int, default=-1,
help='number of epochs')
parser.add_argument('--batch_size', type=int, default=16,
help='minibatch size')
parser.add_argument('--grad_clip_mode', type=str, default='value',
help='value or norm')
parser.add_argument('--grad_clip_value', type=float, default=0.1,
help='clip gradients at this value/max_norm, 0 means no clipping')
parser.add_argument('--drop_prob_lm', type=float, default=0.5,
help='strength of dropout in the Language Model RNN')
parser.add_argument('--self_critical_after', type=int, default=-1,
help='After what epoch do we start finetuning the CNN? (-1 = disable; never finetune, 0 = finetune from start)')
parser.add_argument('--seq_per_img', type=int, default=5,
help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image')
# Sample related
add_eval_sample_opts(parser)
#Optimization: for the Language Model
parser.add_argument('--optim', type=str, default='adam',
help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam|adamw')
parser.add_argument('--learning_rate', type=float, default=4e-4,
help='learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=-1,
help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
parser.add_argument('--learning_rate_decay_every', type=int, default=3,
help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8,
help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--optim_alpha', type=float, default=0.9,
help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999,
help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-8,
help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight_decay')
# Transformer
parser.add_argument('--label_smoothing', type=float, default=0,
help='')
parser.add_argument('--noamopt', action='store_true',
help='')
parser.add_argument('--noamopt_warmup', type=int, default=2000,
help='')
parser.add_argument('--noamopt_factor', type=float, default=1,
help='')
parser.add_argument('--reduce_on_plateau', action='store_true',
help='')
parser.add_argument('--reduce_on_plateau_factor', type=float, default=0.5,
help='')
parser.add_argument('--reduce_on_plateau_patience', type=int, default=3,
help='')
parser.add_argument('--cached_transformer', action='store_true',
help='')
parser.add_argument('--use_warmup', action='store_true',
help='warm up the learing rate?')
parser.add_argument('--scheduled_sampling_start', type=int, default=-1,
help='at what iteration to start decay gt probability')
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5,
help='every how many iterations thereafter to gt probability')
parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05,
help='How much to update the prob')
parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25,
help='Maximum scheduled sampling prob.')
# Evaluation/Checkpointing
parser.add_argument('--val_images_use', type=int, default=5000,
help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
parser.add_argument('--save_checkpoint_every', type=int, default=2500,
help='how often to save a model checkpoint (in iterations)?')
parser.add_argument('--save_every_epoch', action='store_true',
help='Save checkpoint every epoch, will overwrite save_checkpoint_every')
parser.add_argument('--save_history_ckpt', type=int, default=0,
help='If save checkpoints at every save point')
parser.add_argument('--checkpoint_path', type=str, default=None,
help='directory to store checkpointed models')
parser.add_argument('--language_eval', type=int, default=0,
help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--losses_log_every', type=int, default=25,
help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
parser.add_argument('--load_best_score', type=int, default=1,
help='Do we load previous best score when resuming training.')
# misc
parser.add_argument('--id', type=str, default='',
help='an id identifying this run/job. used in cross-val and appended when writing progress files')
parser.add_argument('--train_only', type=int, default=0,
help='if true then use 80k, else use 110k')
# Reward
parser.add_argument('--cider_reward_weight', type=float, default=1,
help='The reward weight from cider')
parser.add_argument('--bleu_reward_weight', type=float, default=0,
help='The reward weight from bleu4')
# Structure_loss
parser.add_argument('--structure_loss_weight', type=float, default=1,
help='')
parser.add_argument('--structure_after', type=int, default=-1,
help='T')
parser.add_argument('--structure_loss_type', type=str, default='seqnll',
help='')
parser.add_argument('--struc_use_logsoftmax', action='store_true', help='')
parser.add_argument('--entropy_reward_weight', type=float, default=0,
help='Entropy reward, seems very interesting')
parser.add_argument('--self_cider_reward_weight', type=float, default=0,
help='self cider reward')
# Used for self critical or structure. Used when sampling is need during training
parser.add_argument('--train_sample_n', type=int, default=16,
help='The reward weight from cider')
parser.add_argument('--train_sample_method', type=str, default='sample',
help='')
parser.add_argument('--train_beam_size', type=int, default=1,
help='')
# Used for self critical
parser.add_argument('--sc_sample_method', type=str, default='greedy',
help='')
parser.add_argument('--sc_beam_size', type=int, default=1,
help='')
# For diversity evaluation during training
add_diversity_opts(parser)
# config
parser.add_argument('--cfg', type=str, default=None,
help='configuration; similar to what is used in detectron')
parser.add_argument(
'--set_cfgs', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]\n This has higher priority'
'than cfg file but lower than other args. (You can only overwrite'
'arguments that have alerady been defined in config file.)',
default=[], nargs='+')
# How will config be used
# 1) read cfg argument, and load the cfg file if it's not None
# 2) Overwrite cfg argument with set_cfgs
# 3) parse config argument to args.
# 4) in the end, parse command line argument and overwrite args
# step 1: read cfg_fn
args = parser.parse_args()
if args.cfg is not None or args.set_cfgs is not None:
from .config import CfgNode
if args.cfg is not None:
cn = CfgNode(CfgNode.load_yaml_with_base(args.cfg))
else:
cn = CfgNode()
if args.set_cfgs is not None:
cn.merge_from_list(args.set_cfgs)
for k,v in cn.items():
if not hasattr(args, k):
print('Warning: key %s not in args' %k)
setattr(args, k, v)
args = parser.parse_args(namespace=args)
# Check if args are valid
assert args.rnn_size > 0, "rnn_size should be greater than 0"
assert args.num_layers > 0, "num_layers should be greater than 0"
assert args.input_encoding_size > 0, "input_encoding_size should be greater than 0"
assert args.batch_size > 0, "batch_size should be greater than 0"
assert args.drop_prob_lm >= 0 and args.drop_prob_lm < 1, "drop_prob_lm should be between 0 and 1"
assert args.seq_per_img > 0, "seq_per_img should be greater than 0"
assert args.beam_size > 0, "beam_size should be greater than 0"
assert args.save_checkpoint_every > 0, "save_checkpoint_every should be greater than 0"
assert args.losses_log_every > 0, "losses_log_every should be greater than 0"
assert args.language_eval == 0 or args.language_eval == 1, "language_eval should be 0 or 1"
assert args.load_best_score == 0 or args.load_best_score == 1, "language_eval should be 0 or 1"
assert args.train_only == 0 or args.train_only == 1, "language_eval should be 0 or 1"
# default value for start_from and checkpoint_path
args.checkpoint_path = args.checkpoint_path or './log_%s' %args.id
args.start_from = args.start_from or args.checkpoint_path
# Deal with feature things before anything
args.use_fc, args.use_att = if_use_feat(args.caption_model)
#if args.use_box: args.att_feat_size = args.att_feat_size + 5 # commented by zihang
return args
def add_eval_options(parser):
# Basic options
parser.add_argument('--batch_size', type=int, default=0,
help='if > 0 then overrule, otherwise load from checkpoint.')
parser.add_argument('--num_images', type=int, default=-1,
help='how many images to use when periodically evaluating the loss? (-1 = all)')
parser.add_argument('--language_eval', type=int, default=0,
help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--dump_images', type=int, default=1,
help='Dump images into vis/imgs folder for vis? (1=yes,0=no)')
parser.add_argument('--dump_json', type=int, default=1,
help='Dump json with predictions into vis folder? (1=yes,0=no)')
parser.add_argument('--dump_path', type=int, default=0,
help='Write image paths along with predictions into vis json? (1=yes,0=no)')
# Sampling options
add_eval_sample_opts(parser)
# For evaluation on a folder of images:
parser.add_argument('--image_folder', type=str, default='',
help='If this is nonempty then will predict on the images in this folder path')
parser.add_argument('--image_root', type=str, default='',
help='In case the image paths have to be preprended with a root path to an image folder')
# For evaluation on MSCOCO images from some split:
parser.add_argument('--input_fc_dir', type=str, default='',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_att_dir', type=str, default='',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_box_dir', type=str, default='',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_trace_dir', type=str, default='',
help='path to trace boxes (T*5)') ## h5file??
parser.add_argument('--input_trace_feat_dir', type=str, default='',
help='path to trace feats (T*1024/2048)') ## h5file??
parser.add_argument('--input_label_h5', type=str, default='',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_json', type=str, default='',
help='path to the json file containing additional info and vocab. empty = fetch from model checkpoint.')
parser.add_argument('--split', type=str, default='test',
help='if running on MSCOCO images, which split to use: val|test|train')
parser.add_argument('--coco_json', type=str, default='',
help='if nonempty then use this file in DataLoaderRaw (see docs there). Used only in MSCOCO test evaluation, where we have a specific json file of only test set images.')
# misc
parser.add_argument('--id', type=str, default='',
help='an id identifying this run/job. used only if language_eval = 1 for appending to intermediate files')
parser.add_argument('--verbose_beam', type=int, default=1,
help='if we need to print out all beam search beams.')
parser.add_argument('--verbose_loss', type=int, default=0,
help='If calculate loss using ground truth during evaluation')
def add_diversity_opts(parser):
parser.add_argument('--sample_n', type=int, default=1,
help='Diverse sampling')
parser.add_argument('--sample_n_method', type=str, default='sample',
help='sample, bs, dbs, gumbel, topk, dgreedy, dsample, dtopk, dtopp')
parser.add_argument('--eval_oracle', type=int, default=1,
help='if we need to calculate loss.')
# Sampling related options
def add_eval_sample_opts(parser):
parser.add_argument('--sample_method', type=str, default='greedy',
help='greedy; sample; gumbel; top<int>, top<0-1>')
parser.add_argument('--beam_size', type=int, default=1,
help='used when sample_method = greedy, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
parser.add_argument('--max_length', type=int, default=20,
help='Maximum length during sampling')
parser.add_argument('--length_penalty', type=str, default='',
help='wu_X or avg_X, X is the alpha')
parser.add_argument('--group_size', type=int, default=1,
help='used for diverse beam search. if group_size is 1, then it\'s normal beam search')
parser.add_argument('--diversity_lambda', type=float, default=0.5,
help='used for diverse beam search. Usually from 0.2 to 0.8. Higher value of lambda produces a more diverse list')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature when sampling from distributions (i.e. when sample_method = sample). Lower = "safer" predictions.')
parser.add_argument('--decoding_constraint', type=int, default=0,
help='If 1, not allowing same word in a row')
parser.add_argument('--block_trigrams', type=int, default=0,
help='block repeated trigram.')
parser.add_argument('--remove_bad_endings', type=int, default=0,
help='Remove bad endings')
parser.add_argument('--suppress_UNK', type=int, default=1,
help='Not predicting UNK')
if __name__ == '__main__':
import sys
sys.argv = [sys.argv[0]]
args = parse_opt()
print(args)
print()
sys.argv = [sys.argv[0], '--cfg', 'configs/updown_long.yml']
args1 = parse_opt()
print(dict(set(vars(args1).items()) - set(vars(args).items())))
print()
sys.argv = [sys.argv[0], '--cfg', 'configs/updown_long.yml', '--caption_model', 'att2in2']
args2 = parse_opt()
print(dict(set(vars(args2).items()) - set(vars(args1).items())))
|
connect-caption-and-trace-main
|
captioning/utils/opts.py
|
import torch
import scipy.optimize
import numpy as np
def local_OT(D, window = 0):
window = window
p = D.shape[1]; m = D.shape[2] # p < m, e.g., p = 10, m = 20
# construct the cx, ax=b
x = torch.rand([10,p*m])
A = torch.zeros([p,p*m])
b = torch.ones([p])
for i in range(p):
A[i, (i)*m:(i+1)*m] = 1
G = torch.zeros([m, p*m])
for i in range(m):
for j in range(p):
G[i, j*m+i] = 1
h = torch.ones([m])
A_local = torch.zeros([p, p, m])
for i in range(p):
# left = np.floor((i - window) * (m*1.0/p))
# right = (i + window) * (m*1.0/p)
left = (i - window) * (m * 1.0 / p)
right = (i + 1 + window) * (m * 1.0 / p)
for j in range(m):
# if j < left or j >= right:
if j < left or j >= right:
A_local[i, i, j] = 1
# if i+window+1<=m-1:
# A_local[i, i, i+(window+1):] = 1
# if i-(window+1) >=0:
# A_local[i, i, :i-window] = 1
A_local = A_local.view([p, p*m])
b_local = torch.zeros([p])
A = torch.cat([A, A_local], 0).numpy()
b = torch.cat([b, b_local], 0).numpy()
G = G.numpy()
h = h.numpy()
T_list = []
for i in range(D.shape[0]):
c = D[i].view(-1).detach().cpu().numpy()
try:
sol = scipy.optimize.linprog(c, A_ub = G, b_ub = h, A_eq = A, b_eq = b, bounds=(0, 1)) #options={'maxiter': 200, 'sym_pos':False}
sol_x = torch.from_numpy(sol.x).view([p,m]).float()
except:
sol_x = torch.cat([torch.eye(p), torch.zeros(p, m-p)], 1)
T_list.append(sol_x)
T = torch.stack(T_list, 0)
return T.to(D.device) #(D * T.cuda()).sum() / p #(T>0.5).float() # binarize it
### for debug
# D = torch.rand([1, 10, 20])
# cost_orig = torch.diag(D[0]).sum()
# T = local_OT(D)
# cost_new = (D * T).sum()
# print(cost_orig, cost_new)
|
connect-caption-and-trace-main
|
captioning/utils/local_optimal_transport.py
|
from random import uniform
import numpy as np
from collections import OrderedDict, defaultdict
from itertools import tee
import time
# -----------------------------------------------
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def compute_div_n(caps,n=1):
aggr_div = []
for k in caps:
all_ngrams = set()
lenT = 0.
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
aggr_div.append(float(len(all_ngrams))/ (1e-6 + float(lenT)))
return np.array(aggr_div).mean(), np.array(aggr_div)
def compute_global_div_n(caps,n=1):
aggr_div = []
all_ngrams = set()
lenT = 0.
for k in caps:
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
if n == 1:
aggr_div.append(float(len(all_ngrams)))
else:
aggr_div.append(float(len(all_ngrams))/ (1e-6 + float(lenT)))
return aggr_div[0], np.repeat(np.array(aggr_div),len(caps))
|
connect-caption-and-trace-main
|
captioning/utils/div_utils.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
from collections import OrderedDict
import torch
import sys
try:
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD import CiderD
from pyciderevalcap.cider.cider import Cider
sys.path.append("coco-caption")
from pycocoevalcap.bleu.bleu import Bleu
except:
print('cider or coco-caption missing')
CiderD_scorer = None
Cider_scorer = None
Bleu_scorer = None
#CiderD_scorer = CiderD(df='corpus')
def init_scorer(cached_tokens):
global CiderD_scorer
CiderD_scorer = CiderD_scorer or CiderD(df=cached_tokens)
global Cider_scorer
Cider_scorer = Cider_scorer or Cider(df=cached_tokens)
global Bleu_scorer
Bleu_scorer = Bleu_scorer or Bleu(4)
def array_to_str(arr):
out = ''
for i in range(len(arr)):
out += str(arr[i]) + ' '
if arr[i] == 0:
break
return out.strip()
def get_self_critical_reward(greedy_res, data_gts, gen_result, opt):
batch_size = len(data_gts)
gen_result_size = gen_result.shape[0]
seq_per_img = gen_result_size // len(data_gts) # gen_result_size = batch_size * seq_per_img
assert greedy_res.shape[0] == batch_size
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
greedy_res = greedy_res.data.cpu().numpy()
for i in range(gen_result_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[gen_result_size + i] = [array_to_str(greedy_res[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(len(res))]
res__ = {i: res[i] for i in range(len(res_))}
gts_ = {i: gts[i // seq_per_img] for i in range(gen_result_size)}
gts_.update({i+gen_result_size: gts[i] for i in range(batch_size)})
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts_, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts_, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
scores = scores[:gen_result_size].reshape(batch_size, seq_per_img) - scores[-batch_size:][:, np.newaxis]
scores = scores.reshape(gen_result_size)
rewards = np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1)
return rewards
def get_scores(data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(batch_size)]
res__ = {i: res[i] for i in range(batch_size)}
gts = {i: gts[i // seq_per_img] for i in range(batch_size)}
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
return scores
def get_self_cider_scores(data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = []
gen_result = gen_result.data.cpu().numpy()
for i in range(batch_size):
res.append(array_to_str(gen_result[i]))
scores = []
for i in range(len(data_gts)):
tmp = Cider_scorer.my_self_cider([res[i*seq_per_img:(i+1)*seq_per_img]])
def get_div(eigvals):
eigvals = np.clip(eigvals, 0, None)
return -np.log(np.sqrt(eigvals[-1]) / (np.sqrt(eigvals).sum())) / np.log(len(eigvals))
scores.append(get_div(np.linalg.eigvalsh(tmp[0]/10)))
scores = np.array(scores)
return scores
|
connect-caption-and-trace-main
|
captioning/utils/rewards.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .local_optimal_transport import local_OT
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
# annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set # use on 11/01
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
print(num_images)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'], data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'caption'), labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
seq, seq_logprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
# print(data['infos'][0]['id'])
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
loss_prev_gt_list = []
acc_list = []
with torch.no_grad():
use_local_OT = False
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
# trace_class_label = trace_feats[:,:,5] - 1
# pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
# prev_gt_correct
prev_gt_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
prev_gt_out = prev_gt_out * trace_masks.unsqueeze(2)
# print(prev_gt_out[0, :, :5])
loss_prev_gt_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
loss_prev_gt = ((torch.abs(prev_gt_out[:, :, :4] - trace_feats[:, :, :4]) * loss_prev_gt_mask).sum() / (
loss_prev_gt_mask.sum() * 4)).item()
loss_prev_gt_list.append(loss_prev_gt)
for i in range(trace_feats.shape[1]):
# for regression
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
tmp_trace_feats[:, i, :5] = curr_out
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks) # for non-iteratively
# break # for non-iteratively
# tmp_trace_feats = curr_out # for non-iteratively
# ### save for visualization # for visualization of trace_generation
# vis_img_id = data['infos'][0]['id']
# np.save('./vis/trace_generation_2/pred_trace_'+str(vis_img_id), tmp_trace_feats[:,:,:4].detach().cpu().numpy())
# np.save('./vis/trace_generation_2/gt_trace_' + str(vis_img_id), trace_feats[:,:,:4].detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open('./vis/trace_generation_2/info.txt', 'a') as f:
# f.write('img_id:%d, l1-loss: %f\n'%(vis_img_id,(crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()))
# f.close()
# ############################
# tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
if use_local_OT:
D = torch.abs(tmp_trace_feats[:, :, :4].unsqueeze(2) - trace_feats[:, :, :4].unsqueeze(1)).mean(dim=-1)
T = local_OT(D).to(tmp_trace_feats.device)
loss = ((torch.abs(torch.matmul(tmp_trace_feats[:, :, :4].transpose(1, 2), T).transpose(1, 2) -
trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
print('loss', loss, 'loss_orig', (
(torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
loss_mask.sum() * 4)).item())
else:
loss = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
# loss = (crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; prev_gt_loss:', loss_prev_gt)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
val_loss_prev_gt = np.mean(np.array(loss_prev_gt_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; prev_gt_loss:', val_loss_prev_gt)
model.train()
return val_loss
def eval_trace_generation_classification(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
acc_list = []
with torch.no_grad():
# get the loss in terms of cross-entropy
model_outputs = model(fc_feats, att_feats, trace_feats[:,:,1:], box_feats, labels[..., :-1], att_masks,
trace_masks)
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
trace_class_label = trace_feats[:, :, 0] * (trace_feats[:, :, 5] != 1).float() - 1
trace_class_label = trace_class_label.view(-1).long()
loss_ce = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1).item()
loss_ce_list.append(loss_ce)
gt_prev_acc = (((model_outputs.argmax(dim=1) == trace_class_label)*(trace_class_label!=-1).float()).sum() / \
(trace_class_label != -1).float().sum()).item()
gt_prev_acc_list.append(gt_prev_acc)
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
trace_class_label = trace_feats[:, :, 0] * (trace_feats[:, :, 5] != 1).float() - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
# for classification
curr_out = model(fc_feats, att_feats, tmp_trace_feats[:,:,1:], box_feats, labels[..., :-1], att_masks, trace_masks)[:,i]
curr_out = curr_out.argmax(dim=-1)
pred_class_label[:, i] = curr_out
curr_out = box_feats[np.arange(box_feats.shape[0]), curr_out]
tmp_trace_feats[:, i, 1:6] = curr_out
# print('prev_gt_class_label', model_outputs.argmax(dim=1).view(pred_class_label.shape)[0])
# print('pred_class_label',pred_class_label[0])
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 5] != 1))
classification_acc = ((pred_class_label == trace_class_label) * loss_mask).sum().float() / loss_mask.sum()
acc_list.append(classification_acc.item())
loss_mask = loss_mask.unsqueeze(2)
loss = ((torch.abs(tmp_trace_feats[:, :, 1:5] - trace_feats[:, :, 1:5]) * loss_mask).sum() / (
loss_mask.sum() * 4)).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; loss_ce:', loss_ce, '; classification-acc:', classification_acc.item(), '; gt_prev_acc:', gt_prev_acc)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss_ce = np.mean(np.array(loss_ce_list))
val_gt_prev_acc = np.mean(np.array(gt_prev_acc_list))
val_loss = np.mean(np.array(loss_list))
val_acc = np.mean(np.array(acc_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; loss_ce:', val_loss_ce, '; classification-acc:', val_acc, '; gt_prev_acc:', val_gt_prev_acc)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
#annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
print(num_images)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'], data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks), labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
seq, seq_logprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
print(data['infos'][0]['id'])
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
loss_prev_gt_list = []
acc_list = []
with torch.no_grad():
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats
trace_class_label = trace_feats[:,:,5] - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
# prev_gt_correct
prev_gt_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
prev_gt_out = prev_gt_out * trace_masks.unsqueeze(2)
loss_prev_gt = crit(prev_gt_out[:, :, :4], trace_feats[:, :, :4]).item()
loss_prev_gt_list.append(loss_prev_gt)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')
# tmp_trace_feats[:, i, :5] = curr_out
break
tmp_trace_feats = curr_out
### save for visualization # for visualization of trace_generation
# vis_img_id = data['infos'][0]['id']
# np.save('./vis/trace_generation/pred_trace_'+str(vis_img_id), curr_out.detach().cpu().numpy())
# np.save('./vis/trace_generation/gt_trace_' + str(vis_img_id), trace_feats.detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open('./vis/trace_generation/info.txt', 'a') as f:
# f.write('img_id:%d, l1-loss: %f\n'%(vis_img_id,(crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()))
# f.close()
############################
tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss = (torch.abs(tmp_trace_feats[:,:,:4] - trace_feats[:,:,:4]).sum() / ((trace_masks!=0).sum() * 4)).item()
# loss = (crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; prev_gt_loss:', loss_prev_gt)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
val_loss_prev_gt = np.mean(np.array(loss_prev_gt_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; prev_gt_loss:', val_loss_prev_gt)
model.train()
return val_loss
def eval_trace_generation_classification(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
acc_list = []
with torch.no_grad():
# get the loss in terms of cross-entropy
model_outputs = model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks,
trace_masks)
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
trace_class_label = trace_feats[:, :, 5] - 1
trace_class_label = trace_class_label.view(-1).long()
loss_ce = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1).item()
loss_ce_list.append(loss_ce)
gt_prev_acc = (((model_outputs.argmax(dim=1) == trace_class_label)*(trace_class_label!=-1).float()).sum() / \
(trace_class_label != -1).float().sum()).item()
gt_prev_acc_list.append(gt_prev_acc)
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats
trace_class_label = trace_feats[:,:,5] - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
# for classification
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:,i]
curr_out = curr_out.argmax(dim=-1)
pred_class_label[:, i] = curr_out
curr_out = box_feats[np.arange(box_feats.shape[0]), curr_out]
tmp_trace_feats[:, i, :5] = curr_out
print('prev_gt_class_label', model_outputs.argmax(dim=1).view(pred_class_label.shape)[0])
print('pred_class_label',pred_class_label[0])
classification_acc = ((pred_class_label == trace_class_label) * trace_masks).sum() / trace_masks.sum()
acc_list.append(classification_acc.item())
tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss = crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; loss_ce:', loss_ce, '; classification-acc:', classification_acc.item(), '; gt_prev_acc:', gt_prev_acc)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss_ce = np.mean(np.array(loss_ce_list))
val_gt_prev_acc = np.mean(np.array(gt_prev_acc_list))
val_loss = np.mean(np.array(loss_list))
val_acc = np.mean(np.array(acc_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; loss_ce:', val_loss_ce, '; classification-acc:', val_acc, '; gt_prev_acc:', val_gt_prev_acc)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_caption_generation.py
|
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel_both import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator_caption, generator_trace, d_model, dropout):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator_caption = generator_caption
self.generator_trace = generator_trace
# self.decode_layernorm = nn.LayerNorm(d_model, elementwise_affine=True)
# self.dropout = nn.Dropout(dropout)
self.trace_layernorm_caption = nn.LayerNorm(d_model, elementwise_affine=True)
self.trace_layernorm_trace = nn.LayerNorm(d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(d_model,0) # here don't use dropout inside positional embedding
self.trace_embed = nn.Sequential(*(
(nn.Linear(5, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5)) ))
self.trace_feat_embed = nn.Sequential(*(
(nn.Linear(2048, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5))))
def forward(self, src, tgt, src_mask, tgt_mask, trace_feat, trace_masks, task):
"Take in and process masked src and target sequences."
memory = self.encode(src, src_mask)
return self.decode(memory, src_mask, tgt, tgt_mask, trace_feat, trace_masks, task), memory
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask, trace_feats, trace_masks, task):
# if task == 'trace':
### get trace_feat
# trace_grid_feats = trace_feats[:, :, 5:]
trace_feats = trace_feats[:, :, :5]
# trace_grid_feats = self.trace_feat_embed(trace_grid_feats)
trace_feats = self.trace_embed(trace_feats)
trace_feats = self.trace_layernorm_trace(self.position_encoder(trace_feats))
### embed the tgt and then add the trace_grid_feat: add trace_feat in the beginning
tgt_emd = self.tgt_embed(tgt, task) #, task
# if tgt.shape[1] > trace_feats.shape[1]:
# trace_feats = torch.cat([trace_feats, torch.zeros([trace_feats.shape[0], tgt_emd.shape[1]-trace_feats.shape[1],
# trace_feats.shape[2]]).to(trace_feats.device)], 1)
# else:
# trace_feats = trace_feats[:, :tgt_emd.shape[1], :]
# tgt_emd = self.dropout(self.decode_layernorm(tgt_emd + trace_feat))
return self.decoder(tgt_emd, trace_feats, memory, src_mask, tgt_mask, trace_masks, task)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
self.norm_2 = LayerNorm(layer.size)
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
for layer in self.layers:
x = layer(x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task)
if task == 'both':
return self.norm(x[0]), self.norm_2(x[1])
else:
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, caption_trace_attn, trace_caption_attn, trace_self_attn, trace_src_attn,
feed_forward_caption, feed_forward_trace, both_caption_trace_attn, both_trace_caption_attn,
both_feed_forward_caption, both_feed_forward_trace,dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward_caption = feed_forward_caption
self.feed_forward_trace = feed_forward_trace
# self.sublayer = clones(SublayerConnection(size, dropout), 3)
self.sublayer = clones(SublayerConnection(size, dropout), 8+4)
###
self.caption_trace_attn = caption_trace_attn
self.trace_caption_attn = trace_caption_attn
self.trace_self_attn = trace_self_attn
self.trace_src_attn = trace_src_attn
### both attn
self.both_caption_trace_attn = both_caption_trace_attn
self.both_trace_caption_attn = both_trace_caption_attn
self.both_feed_forward_caption = both_feed_forward_caption
self.both_feed_forward_trace = both_feed_forward_trace
###########
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
"Follow Figure 1 (right) for connections."
m = memory
if task == 'trace' or task == 'cycle_trace':
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat, trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
# trace_feat = self.sublayer[6](trace_feat, lambda trace_feat: self.trace_caption_attn(trace_feat, x, x, tgt_mask))
################################################
return self.sublayer[7](trace_feat, self.feed_forward_trace)
elif task == 'caption':
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
# trace_mask = tgt_mask[:, -1, :].unsqueeze(1).long()
trace_masks = trace_masks.unsqueeze(1)
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat,
trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
x = self.sublayer[4](x, lambda x: self.caption_trace_attn(x, trace_feat, trace_feat, trace_masks))
################################################
return self.sublayer[5](x, self.feed_forward_caption)
elif task == 'both':
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
# trace_mask = tgt_mask[:, -1, :].unsqueeze(1).long()
# trace_masks = trace_masks.unsqueeze(1)
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat,
trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
trace_masks_for_caption = torch.cat([trace_masks,
trace_masks[:, -1, :].unsqueeze(1).repeat(1,tgt_mask.shape[1]-trace_masks.shape[1],1)], 1)
tgt_mask_for_trace = tgt_mask[:, :trace_masks.shape[1], :]
x_out = self.sublayer[8](x, lambda x: self.both_caption_trace_attn(x, trace_feat, trace_feat, trace_masks_for_caption))
trace_feat_out = self.sublayer[9](trace_feat,
lambda trace_feat: self.both_trace_caption_attn(trace_feat, x, x, tgt_mask_for_trace))
return self.sublayer[10](x_out, self.both_feed_forward_caption), self.sublayer[11](trace_feat_out, self.both_feed_forward_trace)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
self.vocab = vocab
# self.layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
def forward(self, x, task=None):
if task != 'cycle_trace':
return self.lut(x) * math.sqrt(self.d_model)
else:
# # use gumbel softmax with \tau = 1
x = torch.nn.functional.softmax(torch.log(x) -
torch.log(-torch.log(torch.rand([x.shape[2]]))).unsqueeze(0).unsqueeze(0).to(x.device),
dim=-1)
return torch.matmul(x, self.lut(torch.arange(self.vocab).to(x.device))) \
* math.sqrt(self.d_model)
class caption_Embeddings(nn.Module):
def __init__(self, d_model, vocab, position_encoder):
super(caption_Embeddings, self).__init__()
self.position_encoder = position_encoder
self.embed = Embeddings(d_model, vocab)
def forward(self, x, task):
x = self.embed(x, task)
x = self.position_encoder(x)
return x
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class TransformerModel(AttModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
# position_nodropout = PositionalEncoding(d_model, 0)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N_enc),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(attn), c(attn), c(attn), c(attn),
c(ff), c(ff), c(attn), c(attn), c(ff), c(ff), dropout), N_dec),
lambda x:x, # nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
caption_Embeddings(d_model, tgt_vocab, c(position)), #nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), #
Generator(d_model, tgt_vocab), nn.Sequential(nn.Linear(d_model, 5), nn.Sigmoid()),
d_model,dropout)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def __init__(self, opt):
super(TransformerModel, self).__init__(opt)
self.opt = opt
# self.config = yaml.load(open(opt.config_file))
self.N_enc = getattr(opt, 'N_enc', opt.num_layers)
self.N_dec = getattr(opt, 'N_dec', opt.num_layers)
self.d_model = getattr(opt, 'd_model', opt.input_encoding_size)
self.d_ff = getattr(opt, 'd_ff', opt.rnn_size)
self.h = getattr(opt, 'num_att_heads', 8)
self.dropout = getattr(opt, 'dropout', 0.1)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
delattr(self, 'att_embed')
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
# define trace embedding and layernorm
# self.trace_embed = nn.Linear(5, self.d_model)
self.box_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.trace_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ())+
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
self.trace_feat_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(2048, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.box_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.box_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm3 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm4 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.att_layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(self.d_model, self.dropout)
delattr(self, 'embed')
self.embed = lambda x : x
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
delattr(self, 'logit')
del self.ctx2att
tgt_vocab = self.vocab_size + 1
print(self.N_enc, self.N_dec, self.d_model, self.d_ff, self.h, self.dropout)
self.model = self.make_model(0, tgt_vocab,
N_enc=self.N_enc,
N_dec=self.N_dec,
d_model=self.d_model,
d_ff=self.d_ff,
h=self.h,
dropout=self.dropout)
c = copy.deepcopy
# attn = MultiHeadedAttention(h, self.d_model, self.dropout)
# ff = PositionwiseFeedForward(self.d_model, self.d_ff, self.dropout)
position = PositionalEncoding(self.d_model, self.dropout)
self.caption_embed = caption_Embeddings(self.d_model, tgt_vocab, c(position))
def logit(self, x): # unsafe way
return self.model.generator_caption.proj(x)
def init_hidden(self, bsz):
return []
def _prepare_feature(self, fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks):
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
if self.opt.use_trace_feat:
trace_grid_feats = trace_feats[:, :, 5:]
trace_feats = trace_feats[:, :, :5]
trace_grid_feats = self.trace_layernorm3(self.trace_feat_embed(trace_grid_feats))
# trace_grid_feats = self.position_encoder(trace_grid_feats)
# trace_grid_feats = self.trace_layernorm4(trace_grid_feats)
trace_feats = self.trace_layernorm1(self.trace_embed(trace_feats))
if self.opt.use_trace_feat:
trace_feats = trace_feats + trace_grid_feats
# trace_feats_to_decoder = trace_feats
trace_feats = self.position_encoder(trace_feats) # add positional embedding
trace_feats = self.trace_layernorm2(trace_feats)
### comment to test: trace feat not from encoder, only from decoder
# att_feats = torch.cat([att_feats, trace_feats], 1) # concat with trace feats
# att_masks = torch.cat([att_masks, trace_masks.unsqueeze(1)], 2)
###########################
memory = self.model.encode(att_feats, att_masks)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks, trace_feats_to_decoder
def _prepare_feature_forward(self, att_feats, box_feats, att_masks=None, seq=None):
# comment for classification
# att_feats, box_feats, att_masks = self.clip_att(att_feats, box_feats, att_masks)
# original version by ruotian
# att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# my version: without pack and pad
att_feats = self.att_embed(att_feats)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
# crop the last one
# seq = seq[:,:-1]
seq_mask = (seq.data != self.eos_idx) & (seq.data != self.pad_idx)
seq_mask[:,0] = 1 # bos
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & subsequent_mask(seq.size(-1)).to(seq_mask)
seq_per_img = seq.shape[0] // att_feats.shape[0]
if seq_per_img > 1:
att_feats, att_masks = utils.repeat_tensors(seq_per_img,
[att_feats, att_masks]
)
else:
seq_mask = None
return att_feats, box_feats, seq, att_masks, seq_mask
def _forward(self, fc_feats, att_feats, trace_feats, box_feats, seq, att_masks=None, trace_masks=None, task = None):
assert task == 'trace' or task == 'caption' or task == 'both' or task == 'cycle_trace'
if task != 'cycle_trace':
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
if task == 'both':
### get the original caption input
tmp_seq = seq[:, :trace_masks.shape[1]]
_, _, _, _, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
## prepare the shifted trace
shifted_trace = torch.cat(
[torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(shifted_trace.device).unsqueeze(0).unsqueeze(1)
# out = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks)
(out_caption, out_trace), memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask,
task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs_caption = self.model.generator_caption(out_caption)
outputs_trace = self.model.generator_trace(out_trace)
return outputs_caption, outputs_trace
elif task == 'trace' or task == 'cycle_trace':
# for classification
trace_feats = trace_feats[:, :, :5]
### get the original caption input
tmp_seq = torch.ones([trace_masks.shape[0], trace_masks.shape[1]]).to(trace_masks.device) # seq[:, :trace_masks.shape[1]]
seq = seq[:, 1:trace_masks.shape[1]+1] # crop the seq to real length
seq_mask = trace_masks.unsqueeze(1)
att_feats, box_feats, tmp_seq, att_masks, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
## prepare the shifted trace
shifted_trace = torch.cat([torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
# if torch.rand(1) > 0.5: # half [0,0,1,1,1], half random
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(
shifted_trace.device).unsqueeze(0).unsqueeze(1)
# else:
# tmp_1 = torch.rand([shifted_trace.shape[0], shifted_trace.shape[1], 2]).sort(dim=2)[0]
# tmp_2 = torch.rand([shifted_trace.shape[0], shifted_trace.shape[1], 2]).sort(dim=2)[0]
# tmp = torch.stack([tmp_1[:, :, 0], tmp_2[:, :, 0], tmp_1[:, :, 1], tmp_2[:, :, 1],
# (tmp_1[:, :, 1] - tmp_1[:, :, 0]) * (tmp_2[:, :, 1] - tmp_2[:, :, 0])], 2)
# shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
# (1 - random_mask) * tmp.to(shifted_trace.device)
# concat the caption into visual features
seq_emd = self.caption_embed(seq, task)
att_feats = torch.cat([att_feats, seq_emd], 1)
att_masks = torch.cat([att_masks, seq_mask], 2)
# att_masks = torch.ones([att_feats.shape[0], 1, att_feats.shape[1]]).to(att_feats.device)
# out = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks)
out, memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask, task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs = self.model.generator_trace(out)
# for classification, use (masked) dot product to provide logits
# out = out / torch.norm(out, dim=2).unsqueeze(2)
# memory = memory / torch.norm(memory, dim=2).unsqueeze(2)
# outputs = torch.matmul(out, memory.transpose(1,2))
# memory_mask = att_masks
# outputs = outputs.masked_fill(memory_mask == 0, float('-inf'))
#
# outputs = F.softmax(outputs, dim=-1)
# outputs = (outputs.unsqueeze(3) * box_feats.unsqueeze(1)).sum(dim=2)
# print('transformer_out',outputs.argmax(dim=-1)[0])
return outputs
elif task == 'caption':
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
out, _ = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks, task)
outputs = self.model.generator_caption(out)
return outputs
# return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask, trace_feats_to_decoder, trace_masks, task):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
if task == 'caption':
out = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, trace_masks, 'caption')
return out[:, -1], [ys.unsqueeze(0)]
elif task == 'both':
out_caption, out_trace = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, subsequent_mask(ys.size(1)).to(memory.device), 'both')
return out_caption[:, -1], [ys.unsqueeze(0)], out_trace
|
connect-caption-and-trace-main
|
captioning/models/TransformerModel_trace_generation_caption_to_encoder.py
|
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask, past=None):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask, past=past)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
_x = sublayer(self.norm(x))
if type(_x) is tuple: # for multi-head attention that returns past
return x + self.dropout(_x[0]), _x[1]
return x + self.dropout(_x)
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask, past=None):
if past is not None:
present = [[], []]
x = x[:, -1:]
tgt_mask = tgt_mask[:, -1:] if tgt_mask is not None else None
past = list(zip(past[0].split(2, dim=0), past[1].split(2, dim=0)))
else:
past = [None] * len(self.layers)
for i, (layer, layer_past) in enumerate(zip(self.layers, past)):
x = layer(x, memory, src_mask, tgt_mask,
layer_past)
if layer_past is not None:
present[0].append(x[1][0])
present[1].append(x[1][1])
x = x[0]
if past[0] is None:
return self.norm(x)
else:
return self.norm(x), [torch.cat(present[0], 0), torch.cat(present[1], 0)]
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask, layer_past=None):
"Follow Figure 1 (right) for connections."
m = memory
if layer_past is None:
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
else:
present = [None, None]
x, present[0] = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask, layer_past[0]))
x, present[1] = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask, layer_past[1]))
return self.sublayer[2](x, self.feed_forward), present
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None, layer_past=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# The past works differently here. For self attn, the query and key be updated incrementailly
# For src_attn the past is fixed.
# For src_attn, when the layer past is ready
if layer_past is not None and layer_past.shape[2] == key.shape[1] > 1: # suppose memory size always greater than 1
query = self.linears[0](query)
key, value = layer_past[0], layer_past[1]
present = torch.stack([key, value])
else:
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x) for l, x in zip(self.linears, (query, key, value))]
# self attn + past OR the first time step of src attn
if layer_past is not None and not (layer_past.shape[2] == key.shape[1] > 1):
past_key, past_value = layer_past[0], layer_past[1]
key = torch.cat((past_key, key), dim=1)
value = torch.cat((past_value, value), dim=1)
present = torch.stack([key, value])
query, key, value = \
[x.view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for x in [query, key, value]]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
if layer_past is not None:
return self.linears[-1](x), present
else:
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class TransformerModel(AttModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N_enc),
Decoder(DecoderLayer(d_model, c(attn), c(attn),
c(ff), dropout), N_dec),
lambda x:x, # nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def __init__(self, opt):
super(TransformerModel, self).__init__(opt)
self.opt = opt
# self.config = yaml.load(open(opt.config_file))
self.N_enc = getattr(opt, 'N_enc', opt.num_layers)
self.N_dec = getattr(opt, 'N_dec', opt.num_layers)
self.d_model = getattr(opt, 'd_model', opt.input_encoding_size)
self.d_ff = getattr(opt, 'd_ff', opt.rnn_size)
self.h = getattr(opt, 'num_att_heads', 8)
self.dropout = getattr(opt, 'dropout', 0.1)
delattr(self, 'att_embed')
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
delattr(self, 'embed')
self.embed = lambda x : x
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
delattr(self, 'logit')
del self.ctx2att
tgt_vocab = self.vocab_size + 1
self.model = self.make_model(0, tgt_vocab,
N_enc=self.N_enc,
N_dec=self.N_dec,
d_model=self.d_model,
d_ff=self.d_ff,
h=self.h,
dropout=self.dropout)
def logit(self, x): # unsafe way
return self.model.generator.proj(x)
def init_hidden(self, bsz):
return []
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, att_masks)
memory = self.model.encode(att_feats, att_masks)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks
def _prepare_feature_forward(self, att_feats, att_masks=None, seq=None):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
# crop the last one
# seq = seq[:,:-1]
seq_mask = (seq.data != self.eos_idx) & (seq.data != self.pad_idx)
seq_mask[:,0] = 1 # bos
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & subsequent_mask(seq.size(-1)).to(seq_mask)
seq_per_img = seq.shape[0] // att_feats.shape[0]
if seq_per_img > 1:
att_feats, att_masks = utils.repeat_tensors(seq_per_img,
[att_feats, att_masks]
)
else:
seq_mask = None
return att_feats, seq, att_masks, seq_mask
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
att_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, att_masks, seq)
out = self.model(att_feats, seq, att_masks, seq_mask)
outputs = self.model.generator(out)
return outputs
# return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask):
"""
state is the precomputed key/value. N_dec x seq_len x d_model
Note: due to the layer norm, it's not equivalant to stateless,
but it seems behaving similar
"""
# state is tokens + past
if len(state) == 0:
ys = it.unsqueeze(1)
# basically empty state, just to let it know to return past
# The second dim has to be batch_size, for beam search purpose
past = [fc_feats_ph.new_zeros(self.N_dec * 2, fc_feats_ph.shape[0], 0, self.d_model), # self
fc_feats_ph.new_zeros(self.N_dec * 2, fc_feats_ph.shape[0], 0, self.d_model)] # src
# 2 for self attn, 2 for src attn
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
past = state[1:]
out, past = self.model.decode(memory, mask,
ys, # We still feed the full past words, because we need it for position embedding to know the position id
subsequent_mask(ys.size(1))
.to(memory.device),
past=past)
return out[:, -1], [ys.unsqueeze(0)] + past
|
connect-caption-and-trace-main
|
captioning/models/cachedTransformer.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from . import utils
from .CaptionModel import CaptionModel
class ShowTellModel(CaptionModel):
def __init__(self, opt):
super(ShowTellModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.fc_feat_size = opt.fc_feat_size
self.ss_prob = 0.0 # Schedule sampling probability
self.img_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.core = getattr(nn, self.rnn_type.upper())(self.input_encoding_size, self.rnn_size, self.num_layers, bias=False, dropout=self.drop_prob_lm)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
def init_hidden(self, bsz):
weight = self.logit.weight
if self.rnn_type == 'lstm':
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
else:
return weight.new_zeros(self.num_layers, bsz, self.rnn_size)
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
batch_size = fc_feats.size(0)
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = []
if seq_per_img > 1:
fc_feats = utils.repeat_tensors(seq_per_img, fc_feats)
for i in range(seq.size(1) + 1):
if i == 0:
xt = self.img_embed(fc_feats)
else:
if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.data.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i-1].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i-1].data.clone()
#prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
#it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i-1].clone()
# break if all the sequences end
if i >= 2 and seq[:, i-1].data.sum() == 0:
break
xt = self.embed(it)
output, state = self.core(xt.unsqueeze(0), state)
output = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
outputs.append(output)
return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
def get_logprobs_state(self, it, state):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt.unsqueeze(0), state)
logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
return logprobs, state
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
for t in range(2):
if t == 0:
xt = self.img_embed(fc_feats[k:k+1]).expand(beam_size, self.input_encoding_size)
elif t == 1: # input <bos>
it = fc_feats.data.new(beam_size).long().zero_()
xt = self.embed(it)
output, state = self.core(xt.unsqueeze(0), state)
logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
self.done_beams[k] = self.beam_search(state, logprobs, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self.sample_beam(fc_feats, att_feats, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
seq = fc_feats.new_zeros(batch_size, self.seq_length, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size, self.seq_length)
for t in range(self.seq_length + 2):
if t == 0:
xt = self.img_embed(fc_feats)
else:
if t == 1: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
xt = self.embed(it)
output, state = self.core(xt.unsqueeze(0), state)
logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
# sample the next word
if t == self.seq_length + 1: # skip if we achieve maximum length
break
if sample_method == 'greedy':
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1).to(logprobs.device)
sampleLogprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
if t >= 1:
# stop when all finished
if t == 1:
unfinished = it > 0
else:
unfinished = unfinished & (it > 0)
it = it * unfinished.type_as(it)
seq[:,t-1] = it #seq[t] the input of t+2 time step
seqLogprobs[:,t-1] = sampleLogprobs.view(-1)
if unfinished.sum() == 0:
break
return seq, seqLogprobs
|
connect-caption-and-trace-main
|
captioning/models/ShowTellModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task)
else:
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks,
trace_feats_to_decoder, trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
trace_feats_to_decoder, trace_masks, show_gate_labels, task, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, show_gate_labels, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
tmp_trace_feats, trace_masks, task, state,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_both_backup_2020_11_11.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, _ = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None,opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task)
else:
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks,
trace_feats_to_decoder, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
trace_feats_to_decoder, trace_masks, show_gate_labels, task, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, show_gate_labels, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
tmp_trace_feats, trace_masks, show_gate_labels, task, state,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length. Since in controlled caption generation, we assume we know the caption length
if task != 'both':
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
if task != 'both':
return seq, seqLogprobs
else:
tmp_trace_feats = tmp_trace_feats[:, 1:-1]
return seq, seqLogprobs, torch.cat([tmp_trace_feats,
torch.zeros([seq.shape[0], seq.shape[1]-tmp_trace_feats.shape[1], tmp_trace_feats.shape[2]]).to(seq.device)], 1)
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_both.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import numpy as np
import torch
from .ShowTellModel import ShowTellModel
from .FCModel import FCModel
from .AttModel_both import *
from .TransformerModel_mitr import TransformerModel
# from .cachedTransformer import TransformerModel as cachedTransformer
# from .BertCapModel import BertCapModel
# from .M2Transformer import M2TransformerModel
# from .AoAModel import AoAModel
def setup(opt):
if opt.caption_model in ['fc', 'show_tell']:
print('Warning: %s model is mostly deprecated; many new features are not supported.' %opt.caption_model)
if opt.caption_model == 'fc':
print('Use newfc instead of fc')
if opt.caption_model == 'fc':
model = FCModel(opt)
elif opt.caption_model == 'language_model':
model = LMModel(opt)
elif opt.caption_model == 'newfc':
model = NewFCModel(opt)
elif opt.caption_model == 'show_tell':
model = ShowTellModel(opt)
# Att2in model in self-critical
elif opt.caption_model == 'att2in':
model = Att2inModel(opt)
# Att2in model with two-layer MLP img embedding and word embedding
elif opt.caption_model == 'att2in2':
model = Att2in2Model(opt)
elif opt.caption_model == 'att2all2':
print('Warning: this is not a correct implementation of the att2all model in the original paper.')
model = Att2all2Model(opt)
# Adaptive Attention model from Knowing when to look
elif opt.caption_model == 'adaatt':
model = AdaAttModel(opt)
# Adaptive Attention with maxout lstm
elif opt.caption_model == 'adaattmo':
model = AdaAttMOModel(opt)
# Top-down attention model
elif opt.caption_model in ['topdown', 'updown']:
model = UpDownModel(opt)
# StackAtt
elif opt.caption_model == 'stackatt':
model = StackAttModel(opt)
# DenseAtt
elif opt.caption_model == 'denseatt':
model = DenseAttModel(opt)
# Transformer
elif opt.caption_model == 'transformer':
if getattr(opt, 'cached_transformer', False):
model = cachedTransformer(opt)
else:
print(TransformerModel)
model = TransformerModel(opt)
# AoANet
elif opt.caption_model == 'aoa':
model = AoAModel(opt)
elif opt.caption_model == 'bert':
model = BertCapModel(opt)
elif opt.caption_model == 'm2transformer':
model = M2TransformerModel(opt)
else:
raise Exception("Caption model not supported: {}".format(opt.caption_model))
return model
|
connect-caption-and-trace-main
|
captioning/models/__init__.py
|
# This file contains our mirrored Transformer network
# The branch for extracted visual features is implemented in "encoder",
# and then branches for trace and caption are implemented in "decoder"
# The cfg name correspondance:
# N_layer=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel_both import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator_caption, generator_trace, d_model, dropout):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator_caption = generator_caption
self.generator_trace = generator_trace
self.trace_layernorm_caption = nn.LayerNorm(d_model, elementwise_affine=True)
self.trace_layernorm_trace = nn.LayerNorm(d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(d_model,0) # here don't use dropout inside positional embedding
self.trace_embed = nn.Sequential(*(
(nn.Linear(5, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5)) ))
def forward(self, src, tgt, src_mask, tgt_mask, trace_feat, trace_masks, task):
"Take in and process masked src and target sequences."
memory = self.encode(src, src_mask)
return self.decode(memory, src_mask, tgt, tgt_mask, trace_feat, trace_masks, task), memory
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask, trace_feats, trace_masks, task):
# if task == 'trace':
trace_feats = trace_feats[:, :, :5]
trace_feats = self.trace_embed(trace_feats)
trace_feats = self.trace_layernorm_trace(self.position_encoder(trace_feats))
### embed the tgt
tgt_emd = self.tgt_embed(tgt, task)
return self.decoder(tgt_emd, trace_feats, memory, src_mask, tgt_mask, trace_masks, task)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
def __init__(self, layer):
super(Decoder, self).__init__()
self.layer = layer
self.norm = LayerNorm(layer.size)
self.norm_2 = LayerNorm(layer.size)
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
x = self.layer(x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task)
if task == 'both':
return self.norm(x[0]), self.norm_2(x[1])
else:
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, caption_trace_attn, trace_caption_attn, trace_self_attn, trace_src_attn,
feed_forward_caption, feed_forward_trace, both_caption_trace_attn, both_trace_caption_attn,
both_feed_forward_caption, both_feed_forward_trace,
dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward_caption = feed_forward_caption
self.feed_forward_trace = feed_forward_trace
self.sublayer = clones(SublayerConnection(size, dropout), 8+4*len(self.self_attn)) # 4 for each additional layer
### caption / trace generation
self.caption_trace_attn = caption_trace_attn
self.trace_caption_attn = trace_caption_attn
self.trace_self_attn = trace_self_attn
self.trace_src_attn = trace_src_attn
### pred both
self.both_caption_trace_attn = both_caption_trace_attn
self.both_trace_caption_attn = both_trace_caption_attn
self.both_feed_forward_caption = both_feed_forward_caption
self.both_feed_forward_trace = both_feed_forward_trace
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
"Follow Figure 1 (right) for connections."
m = memory
if task == 'trace' or task == 'cycle_trace':
for i in range(len(self.self_attn)):
x = self.sublayer[8 + 0 + 4 * i](x, lambda x: self.self_attn[i](x, x, x, tgt_mask))
x = self.sublayer[8 + 1 + 4 * i](x, lambda x: self.src_attn[i](x, m, m, src_mask))
trace_feat = self.sublayer[8 + 2 + 4 * i](trace_feat,
lambda trace_feat: self.trace_self_attn[i](trace_feat,
trace_feat,
trace_feat,
trace_masks))
trace_feat = self.sublayer[8 + 3 + 4 * i](trace_feat,
lambda trace_feat: self.trace_src_attn[i](trace_feat, m, m,
src_mask))
trace_feat = self.sublayer[0](trace_feat, lambda trace_feat: self.trace_caption_attn(trace_feat, x, x, tgt_mask))
return self.sublayer[1](trace_feat, self.feed_forward_trace)
elif task == 'caption':
trace_masks = trace_masks.unsqueeze(1)
for i in range(len(self.self_attn)):
x = self.sublayer[8 + 0 + 4 * i](x, lambda x: self.self_attn[i](x, x, x, tgt_mask))
x = self.sublayer[8 + 1 + 4 * i](x, lambda x: self.src_attn[i](x, m, m, src_mask))
trace_feat = self.sublayer[8 + 2 + 4 * i](trace_feat,
lambda trace_feat: self.trace_self_attn[i](trace_feat,
trace_feat,
trace_feat,
trace_masks))
trace_feat = self.sublayer[8 + 3 + 4 * i](trace_feat,
lambda trace_feat: self.trace_src_attn[i](trace_feat, m, m,
src_mask))
x = self.sublayer[2](x, lambda x: self.caption_trace_attn(x, trace_feat, trace_feat, trace_masks))
return self.sublayer[3](x, self.feed_forward_caption)
elif task == 'both':
for i in range(len(self.self_attn)):
x = self.sublayer[8 + 0 + 4 * i](x, lambda x: self.self_attn[i](x, x, x, tgt_mask))
x = self.sublayer[8 + 1 + 4 * i](x, lambda x: self.src_attn[i](x, m, m, src_mask))
trace_feat = self.sublayer[8 + 2 + 4 * i](trace_feat,
lambda trace_feat: self.trace_self_attn[i](trace_feat,
trace_feat,
trace_feat,
trace_masks))
trace_feat = self.sublayer[8 + 3 + 4 * i](trace_feat,
lambda trace_feat: self.trace_src_attn[i](trace_feat, m, m,
src_mask))
trace_masks_for_caption = torch.cat([trace_masks,
trace_masks[:, -1, :].unsqueeze(1).repeat(1,tgt_mask.shape[1]-trace_masks.shape[1],1)], 1)
tgt_mask_for_trace = tgt_mask[:, :trace_masks.shape[1], :]
x_out = self.sublayer[4](x, lambda x: self.both_caption_trace_attn(x, trace_feat, trace_feat, trace_masks_for_caption))
trace_feat_out = self.sublayer[5](trace_feat,
lambda trace_feat: self.both_trace_caption_attn(trace_feat, x, x, tgt_mask_for_trace))
return self.sublayer[6](x_out, self.both_feed_forward_caption), self.sublayer[7](trace_feat_out, self.both_feed_forward_trace)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
self.vocab = vocab
# self.layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
def forward(self, x, task=None):
if task != 'cycle_trace':
return self.lut(x) * math.sqrt(self.d_model)
else:
# # use gumbel softmax with \tau = 1
x = torch.nn.functional.softmax(torch.log(x) -
torch.log(-torch.log(torch.rand([x.shape[2]]))).unsqueeze(0).unsqueeze(0).to(x.device),
dim=-1)
return torch.matmul(x, self.lut(torch.arange(self.vocab).to(x.device))) \
* math.sqrt(self.d_model)
class caption_Embeddings(nn.Module):
def __init__(self, d_model, vocab, position_encoder):
super(caption_Embeddings, self).__init__()
self.position_encoder = position_encoder
self.embed = Embeddings(d_model, vocab)
def forward(self, x, task):
x = self.embed(x, task)
x = self.position_encoder(x)
return x
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class TransformerModel(AttModel):
def make_model(self, src_vocab, tgt_vocab, N_layer=1,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
# position_nodropout = PositionalEncoding(d_model, 0)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N_layer),
Decoder(DecoderLayer(d_model, clones(attn, N_layer), clones(attn, N_layer), c(attn), c(attn), clones(attn, N_layer), clones(attn, N_layer),
c(ff), c(ff), c(attn), c(attn), c(ff), c(ff),
dropout)),
lambda x:x, # nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
caption_Embeddings(d_model, tgt_vocab, c(position)), #nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), #
Generator(d_model, tgt_vocab), nn.Sequential(nn.Linear(d_model, 5), nn.Sigmoid()),
d_model,dropout)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def __init__(self, opt):
super(TransformerModel, self).__init__(opt)
self.opt = opt
# self.config = yaml.load(open(opt.config_file))
self.N_layer = getattr(opt, 'N_layer', opt.num_layers) # number of layers
self.d_model = getattr(opt, 'd_model', opt.input_encoding_size)
self.d_ff = getattr(opt, 'd_ff', opt.rnn_size)
self.h = getattr(opt, 'num_att_heads', 8)
self.dropout = getattr(opt, 'dropout', 0.1)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
delattr(self, 'att_embed')
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
# define trace embedding and layernorm
self.box_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.box_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.box_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.att_layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(self.d_model, self.dropout)
delattr(self, 'embed')
self.embed = lambda x : x
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
delattr(self, 'logit')
del self.ctx2att
tgt_vocab = self.vocab_size + 1
self.model = self.make_model(0, tgt_vocab,
N_layer=self.N_layer,
d_model=self.d_model,
d_ff=self.d_ff,
h=self.h,
dropout=self.dropout)
def logit(self, x): # unsafe way
return self.model.generator_caption.proj(x)
def init_hidden(self, bsz):
return []
def _prepare_feature(self, fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks):
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats)
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
###########################
memory = self.model.encode(att_feats, att_masks)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks, trace_feats_to_decoder
def _prepare_feature_forward(self, att_feats, box_feats, att_masks=None, seq=None):
att_feats = self.att_embed(att_feats)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
seq_mask = (seq.data != self.eos_idx) & (seq.data != self.pad_idx)
seq_mask[:,0] = 1 # bos
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & subsequent_mask(seq.size(-1)).to(seq_mask)
seq_per_img = seq.shape[0] // att_feats.shape[0]
if seq_per_img > 1:
att_feats, att_masks = utils.repeat_tensors(seq_per_img,
[att_feats, att_masks]
)
else:
seq_mask = None
return att_feats, box_feats, seq, att_masks, seq_mask
def _forward(self, fc_feats, att_feats, trace_feats, box_feats, seq, att_masks=None, trace_masks=None, task = None):
assert task == 'trace' or task == 'caption' or task == 'both' or task == 'cycle_trace'
if task != 'cycle_trace':
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
if task == 'both':
### get the original caption input
tmp_seq = seq[:, :trace_masks.shape[1]]
_, _, _, _, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
## prepare the shifted trace
shifted_trace = torch.cat(
[torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0.5
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(shifted_trace.device).unsqueeze(0).unsqueeze(1)
(out_caption, out_trace), memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask,
task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs_caption = self.model.generator_caption(out_caption)
outputs_trace = self.model.generator_trace(out_trace)
return outputs_caption, outputs_trace
elif task == 'trace' or task == 'cycle_trace':
# for classification
trace_feats = trace_feats[:, :, :5]
### get the original caption input
tmp_seq = torch.ones([trace_masks.shape[0], trace_masks.shape[1]]).to(trace_masks.device) # seq[:, :trace_masks.shape[1]]
seq = seq[:, 1:trace_masks.shape[1]+1] # crop the seq to real length
seq_mask = trace_masks.unsqueeze(1)
att_feats, box_feats, tmp_seq, att_masks, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
## prepare the shifted trace
shifted_trace = torch.cat([torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(
shifted_trace.device).unsqueeze(0).unsqueeze(1)
out, memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask, task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs = self.model.generator_trace(out)
return outputs
elif task == 'caption':
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
out, _ = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks, task)
outputs = self.model.generator_caption(out)
return outputs
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask, trace_feats_to_decoder, trace_masks, task):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
if task == 'caption':
out = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, trace_masks, 'caption')
return out[:, -1], [ys.unsqueeze(0)]
elif task == 'both':
out_caption, out_trace = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, subsequent_mask(ys.size(1)).to(memory.device), 'both')
return out_caption[:, -1], [ys.unsqueeze(0)], out_trace
|
connect-caption-and-trace-main
|
captioning/models/TransformerModel_mitr.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_encoder_trace.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, task, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,
trace_feats_to_decoder, trace_masks, task)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, trace_feats_to_decoder, trace_masks, task, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,
tmp_trace_feats, trace_masks, task,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
if task != 'both':
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
if task != 'both':
return seq, seqLogprobs
else:
tmp_trace_feats = tmp_trace_feats[:, 1:-1]
return seq, seqLogprobs, torch.cat([tmp_trace_feats,
torch.zeros([seq.shape[0], seq.shape[1] - tmp_trace_feats.shape[1],
tmp_trace_feats.shape[2]]).to(seq.device)], 1)
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_standard_enco_deco_both.py
|
"""
Instruction to use meshed_memory_transformer (https://arxiv.org/abs/1912.08226)
pip install git+https://github.com/ruotianluo/meshed-memory-transformer.git
Note:
Currently m2transformer is not performing as well as original transformer. Not sure why? Still investigating.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
try:
from m2transformer.models.transformer import Transformer, MemoryAugmentedEncoder, MeshedDecoder, ScaledDotProductAttentionMemory
except:
print('meshed-memory-transformer not installed; please run `pip install git+https://github.com/ruotianluo/meshed-memory-transformer.git`')
from .TransformerModel import subsequent_mask, TransformerModel
class M2TransformerModel(TransformerModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
encoder = MemoryAugmentedEncoder(N_enc, 0, attention_module=ScaledDotProductAttentionMemory,
attention_module_kwargs={'m': 40})
# Another implementation is to use MultiLevelEncoder + att_embed
decoder = MeshedDecoder(tgt_vocab, 54, N_dec, -1) # -1 is padding;
model = Transformer(0, encoder, decoder) # 0 is bos
return model
def __init__(self, opt):
super(M2TransformerModel, self).__init__(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x: x # The visual embed is in the MAEncoder
# Notes: The dropout in MAEncoder is different from my att_embed, mine is 0.5?
# Also the attention mask seems wrong in MAEncoder too...intersting
def logit(self, x): # unsafe way
return x # M2transformer always output logsoftmax
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, att_masks)
memory, att_masks = self.model.encoder(att_feats)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
att_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, att_masks, seq)
seq = seq.clone()
seq[~seq_mask.any(-2)] = -1 # Make padding to be -1 (my dataloader uses 0 as padding)
outputs = self.model(att_feats, seq)
return outputs
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
out = self.model.decoder(ys, memory, mask)
return out[:, -1], [ys.unsqueeze(0)]
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
att_feats, _, __, ___ = self._prepare_feature_forward(att_feats, att_masks)
seq, logprobs, seqLogprobs = self.model.beam_search(att_feats, self.seq_length, 0,
beam_size, return_probs=True, out_size=beam_size)
seq = seq.reshape(-1, *seq.shape[2:])
seqLogprobs = seqLogprobs.reshape(-1, *seqLogprobs.shape[2:])
# if not (seqLogprobs.gather(-1, seq.unsqueeze(-1)).squeeze(-1) == logprobs.reshape(-1, logprobs.shape[-1])).all():
# import pudb;pu.db
# seqLogprobs = logprobs.reshape(-1, logprobs.shape[-1]).unsqueeze(-1).expand(-1,-1,seqLogprobs.shape[-1])
return seq, seqLogprobs
|
connect-caption-and-trace-main
|
captioning/models/M2Transformer.py
|
import torch
def repeat_tensors(n, x):
"""
For a tensor of size Bx..., we repeat it n times, and make it Bnx...
For collections, do nested repeat
"""
if torch.is_tensor(x):
x = x.unsqueeze(1) # Bx1x...
x = x.expand(-1, n, *([-1]*len(x.shape[2:]))) # Bxnx...
x = x.reshape(x.shape[0]*n, *x.shape[2:]) # Bnx...
elif type(x) is list or type(x) is tuple:
x = [repeat_tensors(n, _) for _ in x]
return x
def split_tensors(n, x):
if torch.is_tensor(x):
assert x.shape[0] % n == 0
x = x.reshape(x.shape[0] // n, n, *x.shape[1:]).unbind(1)
elif type(x) is list or type(x) is tuple:
x = [split_tensors(n, _) for _ in x]
elif x is None:
x = [None] * n
return x
|
connect-caption-and-trace-main
|
captioning/models/utils.py
|
# This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from ..utils import misc as utils
from . import utils as model_utils
class CaptionModel(nn.Module):
def __init__(self):
super(CaptionModel, self).__init__()
# implements beam search
# calls beam_step and returns the final set of beams
# augments log-probabilities with diversity terms when number of groups > 1
def forward(self, *args, **kwargs):
mode = kwargs.get('mode', 'forward')
if 'mode' in kwargs:
del kwargs['mode']
return getattr(self, '_'+mode)(*args, **kwargs)
def beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobs, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobs = logprobs.clone()
batch_size = beam_seq_table[0].shape[0]
if divm > 0:
change = logprobs.new_zeros(batch_size, logprobs.shape[-1])
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][:, :, local_time] # Nxb
for prev_labels in range(bdash):
change.scatter_add_(1, prev_decisions[:, prev_labels].unsqueeze(-1), change.new_ones(batch_size, 1))
if local_time == 0:
logprobs = logprobs - change * diversity_lambda
else:
logprobs = logprobs - self.repeat_tensor(bdash, change) * diversity_lambda
return logprobs, unaug_logprobs
# does one step of classical beam search
def beam_step(logprobs, unaug_logprobs, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobs: probabilities augmented after diversity N*bxV
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions Nxbxl
#beam_seq_logprobs : log-probability of each decision made, NxbxlxV
#beam_logprobs_sum : joint log-probability of each beam Nxb
batch_size = beam_logprobs_sum.shape[0]
vocab_size = logprobs.shape[-1]
logprobs = logprobs.reshape(batch_size, -1, vocab_size) # NxbxV
if t == 0:
assert logprobs.shape[1] == 1
beam_logprobs_sum = beam_logprobs_sum[:, :1]
candidate_logprobs = beam_logprobs_sum.unsqueeze(-1) + logprobs # beam_logprobs_sum Nxb logprobs is NxbxV
ys, ix = torch.sort(candidate_logprobs.reshape(candidate_logprobs.shape[0], -1), -1, True)
ys, ix = ys[:,:beam_size], ix[:,:beam_size]
beam_ix = ix // vocab_size # Nxb which beam
selected_ix = ix % vocab_size # Nxb # which world
state_ix = (beam_ix + torch.arange(batch_size).type_as(beam_ix).unsqueeze(-1) * logprobs.shape[1]).reshape(-1) # N*b which in Nxb beams
if t > 0:
# gather according to beam_ix
assert (beam_seq.gather(1, beam_ix.unsqueeze(-1).expand_as(beam_seq)) == beam_seq.reshape(-1, beam_seq.shape[-1])[state_ix].view_as(beam_seq)).all()
beam_seq = beam_seq.gather(1, beam_ix.unsqueeze(-1).expand_as(beam_seq))
beam_seq_logprobs = beam_seq_logprobs.gather(1, beam_ix.unsqueeze(-1).unsqueeze(-1).expand_as(beam_seq_logprobs))
beam_seq = torch.cat([beam_seq, selected_ix.unsqueeze(-1)], -1) # beam_seq Nxbxl
beam_logprobs_sum = beam_logprobs_sum.gather(1, beam_ix) + \
logprobs.reshape(batch_size, -1).gather(1, ix)
assert (beam_logprobs_sum == ys).all()
_tmp_beam_logprobs = unaug_logprobs[state_ix].reshape(batch_size, -1, vocab_size)
beam_logprobs = unaug_logprobs.reshape(batch_size, -1, vocab_size).gather(1, beam_ix.unsqueeze(-1).expand(-1, -1, vocab_size)) # NxbxV
assert (_tmp_beam_logprobs == beam_logprobs).all()
beam_seq_logprobs = torch.cat([
beam_seq_logprobs,
beam_logprobs.reshape(batch_size, -1, 1, vocab_size)], 2)
new_state = [None for _ in state]
for _ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[_ix] = state[_ix][:, state_ix]
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state
# Start diverse_beam_search
opt = kwargs['opt']
temperature = opt.get('temperature', 1) # This should not affect beam search, but will affect dbs
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
suppress_UNK = opt.get('suppress_UNK', 0)
length_penalty = utils.penalty_builder(opt.get('length_penalty', ''))
bdash = beam_size // group_size # beam per group
batch_size = init_logprobs.shape[0]
device = init_logprobs.device
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(batch_size, bdash, 0).to(device) for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(batch_size, bdash, 0, self.vocab_size + 1).to(device) for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(batch_size, bdash).to(device) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[[] for __ in range(group_size)] for _ in range(batch_size)]
# state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
# state_table = list(zip(*[_.reshape(-1, batch_size * bdash, group_size, *_.shape[2:]).chunk(group_size, 2) for _ in init_state]))
state_table = [[_.clone() for _ in init_state] for _ in range(group_size)]
# logprobs_table = list(init_logprobs.reshape(batch_size * bdash, group_size, -1).chunk(group_size, 0))
logprobs_table = [init_logprobs.clone() for _ in range(group_size)]
# END INIT
# Chunk elements in the args
args = list(args)
args = model_utils.split_tensors(group_size, args) # For each arg, turn (Bbg)x... to (Bb)x(g)x...
if self.__class__.__name__ == 'AttEnsemble':
args = [[[args[j][i][k] for i in range(len(self.models))] for j in range(len(args))] for k in range(group_size)] # group_name, arg_name, model_name
else:
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobs = logprobs_table[divm]
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobs.scatter_(1, beam_seq_table[divm][:, :, t-divm-1].reshape(-1, 1).to(device), float('-inf'))
if remove_bad_endings and t-divm > 0:
logprobs[torch.from_numpy(np.isin(beam_seq_table[divm][:, :, t-divm-1].cpu().numpy(), self.bad_endings_ix)).reshape(-1), 0] = float('-inf')
# suppress UNK tokens in the decoding
if suppress_UNK and hasattr(self, 'vocab') and self.vocab[str(logprobs.size(1)-1)] == 'UNK':
logprobs[:,logprobs.size(1)-1] = logprobs[:, logprobs.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobs values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
logprobs, unaug_logprobs = add_diversity(beam_seq_table,logprobs,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm] = beam_step(logprobs,
unaug_logprobs,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for b in range(batch_size):
is_end = beam_seq_table[divm][b, :, t-divm] == self.eos_idx
assert beam_seq_table[divm].shape[-1] == t-divm+1
if t == self.seq_length + divm - 1:
is_end.fill_(1)
for vix in range(bdash):
if is_end[vix]:
final_beam = {
'seq': beam_seq_table[divm][b, vix].clone(),
'logps': beam_seq_logprobs_table[divm][b, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][b, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][b, vix].item()
}
final_beam['p'] = length_penalty(t-divm+1, final_beam['p'])
done_beams_table[b][divm].append(final_beam)
beam_logprobs_sum_table[divm][b, is_end] -= 1000
# move the current group one step forward in time
it = beam_seq_table[divm][:, :, t-divm].reshape(-1).to(logprobs.device)
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it, *(args[divm] + [state_table[divm]]))
logprobs_table[divm] = F.log_softmax(logprobs_table[divm] / temperature, dim=-1)
# all beams are sorted by their log-probabilities
done_beams_table = [[sorted(done_beams_table[b][i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)] for b in range(batch_size)]
done_beams = [sum(_, []) for _ in done_beams_table]
return done_beams
def old_beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobsf, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobsf = logprobsf.clone()
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][local_time]
for sub_beam in range(bdash):
for prev_labels in range(bdash):
logprobsf[sub_beam][prev_decisions[prev_labels]] = logprobsf[sub_beam][prev_decisions[prev_labels]] - diversity_lambda
return unaug_logprobsf
# does one step of classical beam search
def beam_step(logprobsf, unaug_logprobsf, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobsf: probabilities augmented after diversity
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions
#beam_seq_logprobs : log-probability of each decision made, same size as beam_seq
#beam_logprobs_sum : joint log-probability of each beam
ys,ix = torch.sort(logprobsf,1,True)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 0:
rows = 1
for c in range(cols): # for each column (word, essentially)
for q in range(rows): # for each beam expansion
#compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[q,c].item()
candidate_logprob = beam_logprobs_sum[q] + local_logprob
# local_unaug_logprob = unaug_logprobsf[q,ix[q,c]]
candidates.append({'c':ix[q,c], 'q':q, 'p':candidate_logprob, 'r':unaug_logprobsf[q]})
candidates = sorted(candidates, key=lambda x: -x['p'])
new_state = [_.clone() for _ in state]
#beam_seq_prev, beam_seq_logprobs_prev
if t >= 1:
#we''ll need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t].clone()
for vix in range(beam_size):
v = candidates[vix]
#fork beam index q into index vix
if t >= 1:
beam_seq[:t, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t, vix] = beam_seq_logprobs_prev[:, v['q']]
#rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][:, vix] = state[state_ix][:, v['q']] # dimension one is time step
#append new end terminal at the end of this beam
beam_seq[t, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state,candidates
# Start diverse_beam_search
opt = kwargs['opt']
temperature = opt.get('temperature', 1) # This should not affect beam search, but will affect dbs
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
suppress_UNK = opt.get('suppress_UNK', 0)
length_penalty = utils.penalty_builder(opt.get('length_penalty', ''))
bdash = beam_size // group_size # beam per group
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(self.seq_length, bdash, self.vocab_size + 1).zero_() for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(bdash) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[] for _ in range(group_size)]
# state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
state_table = list(zip(*[_.chunk(group_size, 1) for _ in init_state]))
logprobs_table = list(init_logprobs.chunk(group_size, 0))
# END INIT
# Chunk elements in the args
args = list(args)
if self.__class__.__name__ == 'AttEnsemble':
args = [[_.chunk(group_size) if _ is not None else [None]*group_size for _ in args_] for args_ in args] # arg_name, model_name, group_name
args = [[[args[j][i][k] for i in range(len(self.models))] for j in range(len(args))] for k in range(group_size)] # group_name, arg_name, model_name
else:
args = [_.chunk(group_size) if _ is not None else [None]*group_size for _ in args]
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobsf = logprobs_table[divm]
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobsf.scatter_(1, beam_seq_table[divm][t-divm-1].unsqueeze(1).to(logprobsf.device), float('-inf'))
if remove_bad_endings and t-divm > 0:
logprobsf[torch.from_numpy(np.isin(beam_seq_table[divm][t-divm-1].cpu().numpy(), self.bad_endings_ix)), 0] = float('-inf')
# suppress UNK tokens in the decoding
if suppress_UNK and hasattr(self, 'vocab') and self.vocab[str(logprobsf.size(1)-1)] == 'UNK':
logprobsf[:,logprobsf.size(1)-1] = logprobsf[:, logprobsf.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobsf values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
unaug_logprobsf = add_diversity(beam_seq_table,logprobsf,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm],\
candidates_divm = beam_step(logprobsf,
unaug_logprobsf,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for vix in range(bdash):
if beam_seq_table[divm][t-divm,vix] == self.eos_idx or t == self.seq_length + divm - 1:
final_beam = {
'seq': beam_seq_table[divm][:, vix].clone(),
'logps': beam_seq_logprobs_table[divm][:, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][:, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][vix].item()
}
final_beam['p'] = length_penalty(t-divm+1, final_beam['p'])
done_beams_table[divm].append(final_beam)
# don't continue beams from finished sequences
beam_logprobs_sum_table[divm][vix] = -1000
# move the current group one step forward in time
it = beam_seq_table[divm][t-divm].to(logprobsf.device)
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it, *(args[divm] + [state_table[divm]]))
logprobs_table[divm] = F.log_softmax(logprobs_table[divm] / temperature, dim=-1)
# all beams are sorted by their log-probabilities
done_beams_table = [sorted(done_beams_table[i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)]
done_beams = sum(done_beams_table, [])
return done_beams
def sample_next_word(self, logprobs, sample_method, temperature):
if sample_method == 'greedy':
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
elif sample_method == 'gumbel': # gumbel softmax
# ref: https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).to(logprobs.device)
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
y = logits + sample_gumbel(logits.size())
return F.log_softmax(y / temperature, dim=-1)
_logprobs = gumbel_softmax_sample(logprobs, temperature)
_, it = torch.max(_logprobs.data, 1)
sampleLogprobs = logprobs.gather(1, it.unsqueeze(1)) # gather the logprobs at sampled positions
else:
logprobs = logprobs / temperature
if sample_method.startswith('top'): # topk sampling
top_num = float(sample_method[3:])
if 0 < top_num < 1:
# nucleus sampling from # The Curious Case of Neural Text Degeneration
probs = F.softmax(logprobs, dim=1)
sorted_probs, sorted_indices = torch.sort(probs, descending=True, dim=1)
_cumsum = sorted_probs.cumsum(1)
mask = _cumsum < top_num
mask = torch.cat([torch.ones_like(mask[:,:1]), mask[:,:-1]], 1)
sorted_probs = sorted_probs * mask.to(sorted_probs)
sorted_probs = sorted_probs / sorted_probs.sum(1, keepdim=True)
logprobs.scatter_(1, sorted_indices, sorted_probs.log())
else:
the_k = int(top_num)
tmp = torch.empty_like(logprobs).fill_(float('-inf'))
topk, indices = torch.topk(logprobs, the_k, dim=1)
tmp = tmp.scatter(1, indices, topk)
logprobs = tmp
it = torch.distributions.Categorical(logits=logprobs.detach()).sample()
sampleLogprobs = logprobs.gather(1, it.unsqueeze(1)) # gather the logprobs at sampled positions
return it, sampleLogprobs
def decode_sequence(self, seq):
return utils.decode_sequence(self.vocab, seq)
|
connect-caption-and-trace-main
|
captioning/models/CaptionModel_orig.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_caption_generation.py
|
# Implementation for paper 'Attention on Attention for Image Captioning'
# https://arxiv.org/abs/1908.06954
# RT: Code from original author's repo: https://github.com/husthuaan/AoANet/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from .AttModel import pack_wrapper, AttModel, Attention
from .TransformerModel import LayerNorm, attention, clones, SublayerConnection, PositionwiseFeedForward
class MultiHeadedDotAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1, scale=1, project_k_v=1, use_output_layer=1, do_aoa=0, norm_q=0, dropout_aoa=0.3):
super(MultiHeadedDotAttention, self).__init__()
assert d_model * scale % h == 0
# We assume d_v always equals d_k
self.d_k = d_model * scale // h
self.h = h
# Do we need to do linear projections on K and V?
self.project_k_v = project_k_v
# normalize the query?
if norm_q:
self.norm = LayerNorm(d_model)
else:
self.norm = lambda x:x
self.linears = clones(nn.Linear(d_model, d_model * scale), 1 + 2 * project_k_v)
# output linear layer after the multi-head attention?
self.output_layer = nn.Linear(d_model * scale, d_model)
# apply aoa after attention?
self.use_aoa = do_aoa
if self.use_aoa:
self.aoa_layer = nn.Sequential(nn.Linear((1 + scale) * d_model, 2 * d_model), nn.GLU())
# dropout to the input of AoA layer
if dropout_aoa > 0:
self.dropout_aoa = nn.Dropout(p=dropout_aoa)
else:
self.dropout_aoa = lambda x:x
if self.use_aoa or not use_output_layer:
# AoA doesn't need the output linear layer
del self.output_layer
self.output_layer = lambda x:x
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, value, key, mask=None):
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(-2)
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
single_query = 0
if len(query.size()) == 2:
single_query = 1
query = query.unsqueeze(1)
nbatches = query.size(0)
query = self.norm(query)
# Do all the linear projections in batch from d_model => h x d_k
if self.project_k_v == 0:
query_ = self.linears[0](query).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
key_ = key.view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
value_ = value.view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
else:
query_, key_, value_ = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# Apply attention on all the projected vectors in batch.
x, self.attn = attention(query_, key_, value_, mask=mask,
dropout=self.dropout)
# "Concat" using a view
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
if self.use_aoa:
# Apply AoA
x = self.aoa_layer(self.dropout_aoa(torch.cat([x, query], -1)))
x = self.output_layer(x)
if single_query:
query = query.squeeze(1)
x = x.squeeze(1)
return x
class AoA_Refiner_Layer(nn.Module):
def __init__(self, size, self_attn, feed_forward, dropout):
super(AoA_Refiner_Layer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.use_ff = 0
if self.feed_forward is not None:
self.use_ff = 1
self.sublayer = clones(SublayerConnection(size, dropout), 1+self.use_ff)
self.size = size
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[-1](x, self.feed_forward) if self.use_ff else x
class AoA_Refiner_Core(nn.Module):
def __init__(self, opt):
super(AoA_Refiner_Core, self).__init__()
attn = MultiHeadedDotAttention(opt.num_heads, opt.rnn_size, project_k_v=1, scale=opt.multi_head_scale, do_aoa=opt.refine_aoa, norm_q=0, dropout_aoa=getattr(opt, 'dropout_aoa', 0.3))
layer = AoA_Refiner_Layer(opt.rnn_size, attn, PositionwiseFeedForward(opt.rnn_size, 2048, 0.1) if opt.use_ff else None, 0.1)
self.layers = clones(layer, 6)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class AoA_Decoder_Core(nn.Module):
def __init__(self, opt):
super(AoA_Decoder_Core, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.d_model = opt.rnn_size
self.use_multi_head = opt.use_multi_head
self.multi_head_scale = opt.multi_head_scale
self.use_ctx_drop = getattr(opt, 'ctx_drop', 0)
self.out_res = getattr(opt, 'out_res', 0)
self.decoder_type = getattr(opt, 'decoder_type', 'AoA')
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size, opt.rnn_size) # we, fc, h^2_t-1
self.out_drop = nn.Dropout(self.drop_prob_lm)
if self.decoder_type == 'AoA':
# AoA layer
self.att2ctx = nn.Sequential(nn.Linear(self.d_model * opt.multi_head_scale + opt.rnn_size, 2 * opt.rnn_size), nn.GLU())
elif self.decoder_type == 'LSTM':
# LSTM layer
self.att2ctx = nn.LSTMCell(self.d_model * opt.multi_head_scale + opt.rnn_size, opt.rnn_size)
else:
# Base linear layer
self.att2ctx = nn.Sequential(nn.Linear(self.d_model * opt.multi_head_scale + opt.rnn_size, opt.rnn_size), nn.ReLU())
# if opt.use_multi_head == 1: # TODO, not implemented for now
# self.attention = MultiHeadedAddAttention(opt.num_heads, opt.d_model, scale=opt.multi_head_scale)
if opt.use_multi_head == 2:
self.attention = MultiHeadedDotAttention(opt.num_heads, opt.rnn_size, project_k_v=0, scale=opt.multi_head_scale, use_output_layer=0, do_aoa=0, norm_q=1)
else:
self.attention = Attention(opt)
if self.use_ctx_drop:
self.ctx_drop = nn.Dropout(self.drop_prob_lm)
else:
self.ctx_drop = lambda x :x
def forward(self, xt, mean_feats, att_feats, p_att_feats, state, att_masks=None):
# state[0][1] is the context vector at the last step
h_att, c_att = self.att_lstm(torch.cat([xt, mean_feats + self.ctx_drop(state[0][1])], 1), (state[0][0], state[1][0]))
if self.use_multi_head == 2:
att = self.attention(h_att, p_att_feats.narrow(2, 0, self.multi_head_scale * self.d_model), p_att_feats.narrow(2, self.multi_head_scale * self.d_model, self.multi_head_scale * self.d_model), att_masks)
else:
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
ctx_input = torch.cat([att, h_att], 1)
if self.decoder_type == 'LSTM':
output, c_logic = self.att2ctx(ctx_input, (state[0][1], state[1][1]))
state = (torch.stack((h_att, output)), torch.stack((c_att, c_logic)))
else:
output = self.att2ctx(ctx_input)
# save the context vector to state[0][1]
state = (torch.stack((h_att, output)), torch.stack((c_att, state[1][1])))
if self.out_res:
# add residual connection
output = output + h_att
output = self.out_drop(output)
return output, state
class AoAModel(AttModel):
def __init__(self, opt):
super(AoAModel, self).__init__(opt)
self.num_layers = 2
# mean pooling
self.use_mean_feats = getattr(opt, 'mean_feats', 1)
if opt.use_multi_head == 2:
del self.ctx2att
self.ctx2att = nn.Linear(opt.rnn_size, 2 * opt.multi_head_scale * opt.rnn_size)
if self.use_mean_feats:
del self.fc_embed
if opt.refine:
self.refiner = AoA_Refiner_Core(opt)
else:
self.refiner = lambda x,y : x
self.core = AoA_Decoder_Core(opt)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed att feats
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
att_feats = self.refiner(att_feats, att_masks)
if self.use_mean_feats:
# meaning pooling
if att_masks is None:
mean_feats = torch.mean(att_feats, dim=1)
else:
mean_feats = (torch.sum(att_feats * att_masks.unsqueeze(-1), 1) / torch.sum(att_masks.unsqueeze(-1), 1))
else:
mean_feats = self.fc_embed(fc_feats)
# Project the attention feats first to reduce memory and computation.
p_att_feats = self.ctx2att(att_feats)
return mean_feats, att_feats, p_att_feats, att_masks
|
connect-caption-and-trace-main
|
captioning/models/AoAModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
return att_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, att_masks, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_orig.py
|
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel_standard_enco_deco_both import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator_caption, generator_trace, d_model, dropout):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator_caption = generator_caption
self.generator_trace = generator_trace
# self.decode_layernorm = nn.LayerNorm(d_model, elementwise_affine=True)
# self.dropout = nn.Dropout(dropout)
self.trace_layernorm_caption = nn.LayerNorm(d_model, elementwise_affine=True)
self.trace_layernorm_trace = nn.LayerNorm(d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(d_model,0) # here don't use dropout inside positional embedding
self.trace_embed = nn.Sequential(*(
(nn.Linear(5, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5)) ))
self.trace_feat_embed = nn.Sequential(*(
(nn.Linear(2048, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5))))
def forward(self, src, tgt, src_mask, tgt_mask, trace_feat, trace_masks, task):
"Take in and process masked src and target sequences."
memory = self.encode(src, src_mask)
return self.decode(memory, src_mask, tgt, tgt_mask, trace_feat, trace_masks, task), memory
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask, trace_feats, trace_masks, task):
# if task == 'trace':
### get trace_feat
# trace_grid_feats = trace_feats[:, :, 5:]
trace_feats = trace_feats[:, :, :5]
# trace_grid_feats = self.trace_feat_embed(trace_grid_feats)
trace_feats = self.trace_embed(trace_feats)
trace_feats = self.trace_layernorm_trace(self.position_encoder(trace_feats))
### embed the tgt and then add the trace_grid_feat: add trace_feat in the beginning
tgt_emd = self.tgt_embed(tgt, task) #, task
# if tgt.shape[1] > trace_feats.shape[1]:
# trace_feats = torch.cat([trace_feats, torch.zeros([trace_feats.shape[0], tgt_emd.shape[1]-trace_feats.shape[1],
# trace_feats.shape[2]]).to(trace_feats.device)], 1)
# else:
# trace_feats = trace_feats[:, :tgt_emd.shape[1], :]
# tgt_emd = self.dropout(self.decode_layernorm(tgt_emd + trace_feat))
return self.decoder(tgt_emd, trace_feats, memory, src_mask, tgt_mask, trace_masks, task)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
self.norm_2 = LayerNorm(layer.size)
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
for layer in self.layers:
x = layer(x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task)
if task == 'both':
return self.norm(x[0]), self.norm_2(x[1])
else:
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, caption_trace_attn, trace_caption_attn, trace_self_attn, trace_src_attn,
feed_forward_caption, feed_forward_trace, both_caption_trace_attn, both_trace_caption_attn,
both_feed_forward_caption, both_feed_forward_trace,dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward_caption = feed_forward_caption
self.feed_forward_trace = feed_forward_trace
# self.sublayer = clones(SublayerConnection(size, dropout), 3)
self.sublayer = clones(SublayerConnection(size, dropout), 8+4)
###
self.caption_trace_attn = caption_trace_attn
self.trace_caption_attn = trace_caption_attn
self.trace_self_attn = trace_self_attn
self.trace_src_attn = trace_src_attn
### both attn
self.both_caption_trace_attn = both_caption_trace_attn
self.both_trace_caption_attn = both_trace_caption_attn
self.both_feed_forward_caption = both_feed_forward_caption
self.both_feed_forward_trace = both_feed_forward_trace
###########
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
"Follow Figure 1 (right) for connections."
m = memory
if task == 'trace' or task == 'cycle_trace':
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat, trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
trace_feat = self.sublayer[6](trace_feat, lambda trace_feat: self.trace_caption_attn(trace_feat, x, x, tgt_mask))
################################################
return self.sublayer[7](trace_feat, self.feed_forward_trace)
elif task == 'caption':
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
# trace_mask = tgt_mask[:, -1, :].unsqueeze(1).long()
trace_masks = trace_masks.unsqueeze(1)
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat,
trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
x = self.sublayer[4](x, lambda x: self.caption_trace_attn(x, trace_feat, trace_feat, trace_masks))
################################################
return self.sublayer[5](x, self.feed_forward_caption)
elif task == 'both':
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
# trace_mask = tgt_mask[:, -1, :].unsqueeze(1).long()
# trace_masks = trace_masks.unsqueeze(1)
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat,
trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
trace_masks_for_caption = torch.cat([trace_masks,
trace_masks[:, -1, :].unsqueeze(1).repeat(1,tgt_mask.shape[1]-trace_masks.shape[1],1)], 1)
tgt_mask_for_trace = tgt_mask[:, :trace_masks.shape[1], :]
# x_out = self.sublayer[8](x, lambda x: self.both_caption_trace_attn(x, trace_feat, trace_feat, trace_masks_for_caption))
# trace_feat_out = self.sublayer[9](trace_feat,
# lambda trace_feat: self.both_trace_caption_attn(trace_feat, x, x, tgt_mask_for_trace))
return self.sublayer[10](x, self.both_feed_forward_caption), self.sublayer[11](trace_feat, self.both_feed_forward_trace)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
self.vocab = vocab
# self.layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
def forward(self, x, task=None):
if task != 'cycle_trace':
return self.lut(x) * math.sqrt(self.d_model)
else:
# # use gumbel softmax with \tau = 1
x = torch.nn.functional.softmax(torch.log(x) -
torch.log(-torch.log(torch.rand([x.shape[2]]))).unsqueeze(0).unsqueeze(0).to(x.device),
dim=-1)
return torch.matmul(x, self.lut(torch.arange(self.vocab).to(x.device))) \
* math.sqrt(self.d_model)
class caption_Embeddings(nn.Module):
def __init__(self, d_model, vocab, position_encoder):
super(caption_Embeddings, self).__init__()
self.position_encoder = position_encoder
self.embed = Embeddings(d_model, vocab)
def forward(self, x, task):
x = self.embed(x, task)
x = self.position_encoder(x)
return x
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class TransformerModel(AttModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
# position_nodropout = PositionalEncoding(d_model, 0)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N_enc),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(attn), c(attn), c(attn), c(attn),
c(ff), c(ff), c(attn), c(attn), c(ff), c(ff), dropout), N_dec),
lambda x:x, # nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
caption_Embeddings(d_model, tgt_vocab, c(position)), #nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), #
Generator(d_model, tgt_vocab), nn.Sequential(nn.Linear(d_model, 5), nn.Sigmoid()),
d_model,dropout)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def __init__(self, opt):
super(TransformerModel, self).__init__(opt)
self.opt = opt
# self.config = yaml.load(open(opt.config_file))
self.N_enc = getattr(opt, 'N_enc', opt.num_layers)
self.N_dec = getattr(opt, 'N_dec', opt.num_layers)
self.d_model = getattr(opt, 'd_model', opt.input_encoding_size)
self.d_ff = getattr(opt, 'd_ff', opt.rnn_size)
self.h = getattr(opt, 'num_att_heads', 8)
self.dropout = getattr(opt, 'dropout', 0.1)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
delattr(self, 'att_embed')
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
# define trace embedding and layernorm
# self.trace_embed = nn.Linear(5, self.d_model)
self.box_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.trace_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ())+
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
self.trace_feat_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(2048, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.box_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.box_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm3 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm4 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.att_layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(self.d_model, self.dropout)
delattr(self, 'embed')
self.embed = lambda x : x
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
delattr(self, 'logit')
del self.ctx2att
tgt_vocab = self.vocab_size + 1
print(self.N_enc, self.N_dec, self.d_model, self.d_ff, self.h, self.dropout)
self.model = self.make_model(0, tgt_vocab,
N_enc=self.N_enc,
N_dec=self.N_dec,
d_model=self.d_model,
d_ff=self.d_ff,
h=self.h,
dropout=self.dropout)
def logit(self, x): # unsafe way
return self.model.generator_caption.proj(x)
def init_hidden(self, bsz):
return []
def _prepare_feature(self, fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks):
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
if self.opt.use_trace_feat:
trace_grid_feats = trace_feats[:, :, 5:]
trace_feats = trace_feats[:, :, :5]
trace_grid_feats = self.trace_layernorm3(self.trace_feat_embed(trace_grid_feats))
# trace_grid_feats = self.position_encoder(trace_grid_feats)
# trace_grid_feats = self.trace_layernorm4(trace_grid_feats)
trace_feats = self.trace_layernorm1(self.trace_embed(trace_feats))
if self.opt.use_trace_feat:
trace_feats = trace_feats + trace_grid_feats
# trace_feats_to_decoder = trace_feats
trace_feats = self.position_encoder(trace_feats) # add positional embedding
trace_feats = self.trace_layernorm2(trace_feats)
### comment to test: trace feat not from encoder, only from decoder
# att_feats = torch.cat([att_feats, trace_feats], 1) # concat with trace feats
# att_masks = torch.cat([att_masks, trace_masks.unsqueeze(1)], 2)
###########################
memory = self.model.encode(att_feats, att_masks)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks, trace_feats_to_decoder
def _prepare_feature_forward(self, att_feats, box_feats, att_masks=None, seq=None):
# comment for classification
# att_feats, box_feats, att_masks = self.clip_att(att_feats, box_feats, att_masks)
# original version by ruotian
# att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# my version: without pack and pad
att_feats = self.att_embed(att_feats)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
# crop the last one
# seq = seq[:,:-1]
seq_mask = (seq.data != self.eos_idx) & (seq.data != self.pad_idx)
seq_mask[:,0] = 1 # bos
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & subsequent_mask(seq.size(-1)).to(seq_mask)
seq_per_img = seq.shape[0] // att_feats.shape[0]
if seq_per_img > 1:
att_feats, att_masks = utils.repeat_tensors(seq_per_img,
[att_feats, att_masks]
)
else:
seq_mask = None
return att_feats, box_feats, seq, att_masks, seq_mask
def _forward(self, fc_feats, att_feats, trace_feats, box_feats, seq, att_masks=None, trace_masks=None, task = None):
assert task == 'trace' or task == 'caption' or task == 'both' or task == 'cycle_trace'
if task != 'cycle_trace':
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
if task == 'both':
### get the original caption input
tmp_seq = seq[:, :trace_masks.shape[1]]
_, _, _, _, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
## prepare the shifted trace
shifted_trace = torch.cat(
[torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(shifted_trace.device).unsqueeze(0).unsqueeze(1)
# out = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks)
(out_caption, out_trace), memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask,
task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs_caption = self.model.generator_caption(out_caption)
outputs_trace = self.model.generator_trace(out_trace)
return outputs_caption, outputs_trace
elif task == 'trace' or task == 'cycle_trace':
# for classification
trace_feats = trace_feats[:, :, :5]
### get the original caption input
tmp_seq = torch.ones([trace_masks.shape[0], trace_masks.shape[1]]).to(trace_masks.device) # seq[:, :trace_masks.shape[1]]
seq = seq[:, 1:trace_masks.shape[1]+1] # crop the seq to real length
seq_mask = trace_masks.unsqueeze(1)
att_feats, box_feats, tmp_seq, att_masks, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
## prepare the shifted trace
shifted_trace = torch.cat([torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
# if torch.rand(1) > 0.5: # half [0,0,1,1,1], half random
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(
shifted_trace.device).unsqueeze(0).unsqueeze(1)
# else:
# tmp_1 = torch.rand([shifted_trace.shape[0], shifted_trace.shape[1], 2]).sort(dim=2)[0]
# tmp_2 = torch.rand([shifted_trace.shape[0], shifted_trace.shape[1], 2]).sort(dim=2)[0]
# tmp = torch.stack([tmp_1[:, :, 0], tmp_2[:, :, 0], tmp_1[:, :, 1], tmp_2[:, :, 1],
# (tmp_1[:, :, 1] - tmp_1[:, :, 0]) * (tmp_2[:, :, 1] - tmp_2[:, :, 0])], 2)
# shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
# (1 - random_mask) * tmp.to(shifted_trace.device)
# out = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks)
out, memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask, task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs = self.model.generator_trace(out)
# for classification, use (masked) dot product to provide logits
# out = out / torch.norm(out, dim=2).unsqueeze(2)
# memory = memory / torch.norm(memory, dim=2).unsqueeze(2)
# outputs = torch.matmul(out, memory.transpose(1,2))
# memory_mask = att_masks
# outputs = outputs.masked_fill(memory_mask == 0, float('-inf'))
#
# outputs = F.softmax(outputs, dim=-1)
# outputs = (outputs.unsqueeze(3) * box_feats.unsqueeze(1)).sum(dim=2)
# print('transformer_out',outputs.argmax(dim=-1)[0])
return outputs
elif task == 'caption':
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
out, _ = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks, task)
outputs = self.model.generator_caption(out)
return outputs
# return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask, trace_feats_to_decoder, trace_masks, task):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
if task == 'caption':
out = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, trace_masks, 'caption')
return out[:, -1], [ys.unsqueeze(0)]
elif task == 'both':
out_caption, out_trace = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, subsequent_mask(ys.size(1)).to(memory.device), 'both')
return out_caption[:, -1], [ys.unsqueeze(0)], out_trace
|
connect-caption-and-trace-main
|
captioning/models/TransformerModel_standard_enco_deco_both.py
|
# This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from ..utils import misc as utils
from . import utils as model_utils
class CaptionModel(nn.Module):
def __init__(self):
super(CaptionModel, self).__init__()
# implements beam search
# calls beam_step and returns the final set of beams
# augments log-probabilities with diversity terms when number of groups > 1
def forward(self, *args, **kwargs):
mode = kwargs.get('mode', 'forward')
if 'mode' in kwargs:
del kwargs['mode']
return getattr(self, '_'+mode)(*args, **kwargs)
def beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobs, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobs = logprobs.clone()
batch_size = beam_seq_table[0].shape[0]
if divm > 0:
change = logprobs.new_zeros(batch_size, logprobs.shape[-1])
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][:, :, local_time] # Nxb
for prev_labels in range(bdash):
change.scatter_add_(1, prev_decisions[:, prev_labels].unsqueeze(-1), change.new_ones(batch_size, 1))
if local_time == 0:
logprobs = logprobs - change * diversity_lambda
else:
logprobs = logprobs - self.repeat_tensor(bdash, change) * diversity_lambda
return logprobs, unaug_logprobs
# does one step of classical beam search
def beam_step(logprobs, unaug_logprobs, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobs: probabilities augmented after diversity N*bxV
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions Nxbxl
#beam_seq_logprobs : log-probability of each decision made, NxbxlxV
#beam_logprobs_sum : joint log-probability of each beam Nxb
batch_size = beam_logprobs_sum.shape[0]
vocab_size = logprobs.shape[-1]
logprobs = logprobs.reshape(batch_size, -1, vocab_size) # NxbxV
if t == 0:
assert logprobs.shape[1] == 1
beam_logprobs_sum = beam_logprobs_sum[:, :1]
candidate_logprobs = beam_logprobs_sum.unsqueeze(-1) + logprobs # beam_logprobs_sum Nxb logprobs is NxbxV
ys, ix = torch.sort(candidate_logprobs.reshape(candidate_logprobs.shape[0], -1), -1, True)
ys, ix = ys[:,:beam_size], ix[:,:beam_size]
beam_ix = ix // vocab_size # Nxb which beam
selected_ix = ix % vocab_size # Nxb # which world
state_ix = (beam_ix + torch.arange(batch_size).type_as(beam_ix).unsqueeze(-1) * logprobs.shape[1]).reshape(-1) # N*b which in Nxb beams
if t > 0:
# gather according to beam_ix
assert (beam_seq.gather(1, beam_ix.unsqueeze(-1).expand_as(beam_seq)) == beam_seq.reshape(-1, beam_seq.shape[-1])[state_ix].view_as(beam_seq)).all()
beam_seq = beam_seq.gather(1, beam_ix.unsqueeze(-1).expand_as(beam_seq))
beam_seq_logprobs = beam_seq_logprobs.gather(1, beam_ix.unsqueeze(-1).unsqueeze(-1).expand_as(beam_seq_logprobs))
beam_seq = torch.cat([beam_seq, selected_ix.unsqueeze(-1)], -1) # beam_seq Nxbxl
beam_logprobs_sum = beam_logprobs_sum.gather(1, beam_ix) + \
logprobs.reshape(batch_size, -1).gather(1, ix)
assert (beam_logprobs_sum == ys).all()
_tmp_beam_logprobs = unaug_logprobs[state_ix].reshape(batch_size, -1, vocab_size)
beam_logprobs = unaug_logprobs.reshape(batch_size, -1, vocab_size).gather(1, beam_ix.unsqueeze(-1).expand(-1, -1, vocab_size)) # NxbxV
assert (_tmp_beam_logprobs == beam_logprobs).all()
beam_seq_logprobs = torch.cat([
beam_seq_logprobs,
beam_logprobs.reshape(batch_size, -1, 1, vocab_size)], 2)
new_state = [None for _ in state]
for _ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[_ix] = state[_ix][:, state_ix]
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state
# Start diverse_beam_search
opt = kwargs['opt']
temperature = opt.get('temperature', 1) # This should not affect beam search, but will affect dbs
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
suppress_UNK = opt.get('suppress_UNK', 0)
length_penalty = utils.penalty_builder(opt.get('length_penalty', ''))
bdash = beam_size // group_size # beam per group
batch_size = init_logprobs.shape[0]
device = init_logprobs.device
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(batch_size, bdash, 0).to(device) for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(batch_size, bdash, 0, self.vocab_size + 1).to(device) for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(batch_size, bdash).to(device) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[[] for __ in range(group_size)] for _ in range(batch_size)]
# state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
# state_table = list(zip(*[_.reshape(-1, batch_size * bdash, group_size, *_.shape[2:]).chunk(group_size, 2) for _ in init_state]))
state_table = [[_.clone() for _ in init_state] for _ in range(group_size)]
# logprobs_table = list(init_logprobs.reshape(batch_size * bdash, group_size, -1).chunk(group_size, 0))
logprobs_table = [init_logprobs.clone() for _ in range(group_size)]
# END INIT
# Chunk elements in the args
args = list(args)
args = model_utils.split_tensors(group_size, args) # For each arg, turn (Bbg)x... to (Bb)x(g)x...
if self.__class__.__name__ == 'AttEnsemble':
args = [[[args[j][i][k] for i in range(len(self.models))] for j in range(len(args))] for k in range(group_size)] # group_name, arg_name, model_name
else:
args = [[args[i][j] for i in range(len(args)-1)]+[args[-1]] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobs = logprobs_table[divm]
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobs.scatter_(1, beam_seq_table[divm][:, :, t-divm-1].reshape(-1, 1).to(device), float('-inf'))
if remove_bad_endings and t-divm > 0:
logprobs[torch.from_numpy(np.isin(beam_seq_table[divm][:, :, t-divm-1].cpu().numpy(), self.bad_endings_ix)).reshape(-1), 0] = float('-inf')
# suppress UNK tokens in the decoding
if suppress_UNK and hasattr(self, 'vocab') and self.vocab[str(logprobs.size(1)-1)] == 'UNK':
logprobs[:,logprobs.size(1)-1] = logprobs[:, logprobs.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobs values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
logprobs, unaug_logprobs = add_diversity(beam_seq_table,logprobs,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm] = beam_step(logprobs,
unaug_logprobs,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for b in range(batch_size):
is_end = beam_seq_table[divm][b, :, t-divm] == self.eos_idx
assert beam_seq_table[divm].shape[-1] == t-divm+1
if t == self.seq_length + divm - 1:
is_end.fill_(1)
for vix in range(bdash):
if is_end[vix]:
final_beam = {
'seq': beam_seq_table[divm][b, vix].clone(),
'logps': beam_seq_logprobs_table[divm][b, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][b, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][b, vix].item()
}
final_beam['p'] = length_penalty(t-divm+1, final_beam['p'])
done_beams_table[b][divm].append(final_beam)
beam_logprobs_sum_table[divm][b, is_end] -= 1000
# move the current group one step forward in time
it = beam_seq_table[divm][:, :, t-divm].reshape(-1).to(logprobs.device)
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it, *(args[divm] + [state_table[divm]]))
logprobs_table[divm] = F.log_softmax(logprobs_table[divm] / temperature, dim=-1)
# all beams are sorted by their log-probabilities
done_beams_table = [[sorted(done_beams_table[b][i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)] for b in range(batch_size)]
done_beams = [sum(_, []) for _ in done_beams_table]
return done_beams
def old_beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobsf, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobsf = logprobsf.clone()
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][local_time]
for sub_beam in range(bdash):
for prev_labels in range(bdash):
logprobsf[sub_beam][prev_decisions[prev_labels]] = logprobsf[sub_beam][prev_decisions[prev_labels]] - diversity_lambda
return unaug_logprobsf
# does one step of classical beam search
def beam_step(logprobsf, unaug_logprobsf, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobsf: probabilities augmented after diversity
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions
#beam_seq_logprobs : log-probability of each decision made, same size as beam_seq
#beam_logprobs_sum : joint log-probability of each beam
ys,ix = torch.sort(logprobsf,1,True)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 0:
rows = 1
for c in range(cols): # for each column (word, essentially)
for q in range(rows): # for each beam expansion
#compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[q,c].item()
candidate_logprob = beam_logprobs_sum[q] + local_logprob
# local_unaug_logprob = unaug_logprobsf[q,ix[q,c]]
candidates.append({'c':ix[q,c], 'q':q, 'p':candidate_logprob, 'r':unaug_logprobsf[q]})
candidates = sorted(candidates, key=lambda x: -x['p'])
new_state = [_.clone() for _ in state]
#beam_seq_prev, beam_seq_logprobs_prev
if t >= 1:
#we''ll need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t].clone()
for vix in range(beam_size):
v = candidates[vix]
#fork beam index q into index vix
if t >= 1:
beam_seq[:t, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t, vix] = beam_seq_logprobs_prev[:, v['q']]
#rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][:, vix] = state[state_ix][:, v['q']] # dimension one is time step
#append new end terminal at the end of this beam
beam_seq[t, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state,candidates
# Start diverse_beam_search
opt = kwargs['opt']
temperature = opt.get('temperature', 1) # This should not affect beam search, but will affect dbs
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
suppress_UNK = opt.get('suppress_UNK', 0)
length_penalty = utils.penalty_builder(opt.get('length_penalty', ''))
bdash = beam_size // group_size # beam per group
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(self.seq_length, bdash, self.vocab_size + 1).zero_() for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(bdash) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[] for _ in range(group_size)]
# state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
state_table = list(zip(*[_.chunk(group_size, 1) for _ in init_state]))
logprobs_table = list(init_logprobs.chunk(group_size, 0))
# END INIT
# Chunk elements in the args
args = list(args)
if self.__class__.__name__ == 'AttEnsemble':
args = [[_.chunk(group_size) if _ is not None else [None]*group_size for _ in args_] for args_ in args] # arg_name, model_name, group_name
args = [[[args[j][i][k] for i in range(len(self.models))] for j in range(len(args))] for k in range(group_size)] # group_name, arg_name, model_name
else:
args = [_.chunk(group_size) if _ is not None else [None]*group_size for _ in args]
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobsf = logprobs_table[divm]
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobsf.scatter_(1, beam_seq_table[divm][t-divm-1].unsqueeze(1).to(logprobsf.device), float('-inf'))
if remove_bad_endings and t-divm > 0:
logprobsf[torch.from_numpy(np.isin(beam_seq_table[divm][t-divm-1].cpu().numpy(), self.bad_endings_ix)), 0] = float('-inf')
# suppress UNK tokens in the decoding
if suppress_UNK and hasattr(self, 'vocab') and self.vocab[str(logprobsf.size(1)-1)] == 'UNK':
logprobsf[:,logprobsf.size(1)-1] = logprobsf[:, logprobsf.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobsf values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
unaug_logprobsf = add_diversity(beam_seq_table,logprobsf,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm],\
candidates_divm = beam_step(logprobsf,
unaug_logprobsf,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for vix in range(bdash):
if beam_seq_table[divm][t-divm,vix] == self.eos_idx or t == self.seq_length + divm - 1:
final_beam = {
'seq': beam_seq_table[divm][:, vix].clone(),
'logps': beam_seq_logprobs_table[divm][:, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][:, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][vix].item()
}
final_beam['p'] = length_penalty(t-divm+1, final_beam['p'])
done_beams_table[divm].append(final_beam)
# don't continue beams from finished sequences
beam_logprobs_sum_table[divm][vix] = -1000
# move the current group one step forward in time
it = beam_seq_table[divm][t-divm].to(logprobsf.device)
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it, *(args[divm] + [state_table[divm]]))
logprobs_table[divm] = F.log_softmax(logprobs_table[divm] / temperature, dim=-1)
# all beams are sorted by their log-probabilities
done_beams_table = [sorted(done_beams_table[i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)]
done_beams = sum(done_beams_table, [])
return done_beams
def sample_next_word(self, logprobs, sample_method, temperature):
if sample_method == 'greedy':
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
elif sample_method == 'gumbel': # gumbel softmax
# ref: https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).to(logprobs.device)
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
y = logits + sample_gumbel(logits.size())
return F.log_softmax(y / temperature, dim=-1)
_logprobs = gumbel_softmax_sample(logprobs, temperature)
_, it = torch.max(_logprobs.data, 1)
sampleLogprobs = logprobs.gather(1, it.unsqueeze(1)) # gather the logprobs at sampled positions
else:
logprobs = logprobs / temperature
if sample_method.startswith('top'): # topk sampling
top_num = float(sample_method[3:])
if 0 < top_num < 1:
# nucleus sampling from # The Curious Case of Neural Text Degeneration
probs = F.softmax(logprobs, dim=1)
sorted_probs, sorted_indices = torch.sort(probs, descending=True, dim=1)
_cumsum = sorted_probs.cumsum(1)
mask = _cumsum < top_num
mask = torch.cat([torch.ones_like(mask[:,:1]), mask[:,:-1]], 1)
sorted_probs = sorted_probs * mask.to(sorted_probs)
sorted_probs = sorted_probs / sorted_probs.sum(1, keepdim=True)
logprobs.scatter_(1, sorted_indices, sorted_probs.log())
else:
the_k = int(top_num)
tmp = torch.empty_like(logprobs).fill_(float('-inf'))
topk, indices = torch.topk(logprobs, the_k, dim=1)
tmp = tmp.scatter(1, indices, topk)
logprobs = tmp
it = torch.distributions.Categorical(logits=logprobs.detach()).sample()
sampleLogprobs = logprobs.gather(1, it.unsqueeze(1)) # gather the logprobs at sampled positions
return it, sampleLogprobs
def decode_sequence(self, seq):
return utils.decode_sequence(self.vocab, seq)
|
connect-caption-and-trace-main
|
captioning/models/CaptionModel.py
|
# This file is the implementation for ensemble evaluation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from .CaptionModel import CaptionModel
from .AttModel import pack_wrapper, AttModel
class AttEnsemble(AttModel):
def __init__(self, models, weights=None):
CaptionModel.__init__(self)
# super(AttEnsemble, self).__init__()
self.models = nn.ModuleList(models)
self.vocab_size = models[0].vocab_size
self.seq_length = models[0].seq_length
self.bad_endings_ix = models[0].bad_endings_ix
self.ss_prob = 0
weights = weights or [1.0] * len(self.models)
self.register_buffer('weights', torch.tensor(weights))
def init_hidden(self, batch_size):
state = [m.init_hidden(batch_size) for m in self.models]
return self.pack_state(state)
def pack_state(self, state):
self.state_lengths = [len(_) for _ in state]
return sum([list(_) for _ in state], [])
def unpack_state(self, state):
out = []
for l in self.state_lengths:
out.append(state[:l])
state = state[l:]
return out
def embed(self, it):
return [m.embed(it) for m in self.models]
def core(self, *args):
return zip(*[m.core(*_) for m, _ in zip(self.models, zip(*args))])
def get_logprobs_state(self, it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
state = self.unpack_state(state)
output, state = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks)
logprobs = torch.stack([F.softmax(m.logit(output[i]), dim=1) for i,m in enumerate(self.models)], 2).mul(self.weights).div(self.weights.sum()).sum(-1).log()
return logprobs, self.pack_state(state)
def _prepare_feature(self, *args):
return tuple(zip(*[m._prepare_feature(*args) for m in self.models]))
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
fc_feats, att_feats, p_att_feats, att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats = [fc_feats[i][k:k+1].expand(beam_size, fc_feats[i].size(1)) for i,m in enumerate(self.models)]
tmp_att_feats = [att_feats[i][k:k+1].expand(*((beam_size,)+att_feats[i].size()[1:])).contiguous() for i,m in enumerate(self.models)]
tmp_p_att_feats = [p_att_feats[i][k:k+1].expand(*((beam_size,)+p_att_feats[i].size()[1:])).contiguous() for i,m in enumerate(self.models)]
tmp_att_masks = [att_masks[i][k:k+1].expand(*((beam_size,)+att_masks[i].size()[1:])).contiguous() if att_masks[i] is not None else att_masks[i] for i,m in enumerate(self.models)]
it = fc_feats[0].data.new(beam_size).long().zero_()
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
# return the samples and their log likelihoods
|
connect-caption-and-trace-main
|
captioning/models/AttEnsemble.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from . import utils
from .CaptionModel import CaptionModel
class LSTMCore(nn.Module):
def __init__(self, opt):
super(LSTMCore, self).__init__()
self.input_encoding_size = opt.input_encoding_size
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
# Build a LSTM
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
def forward(self, xt, state):
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = torch.max(\
all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size),
all_input_sums.narrow(1, 4 * self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class FCModel(CaptionModel):
def __init__(self, opt):
super(FCModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.fc_feat_size = opt.fc_feat_size
self.ss_prob = 0.0 # Schedule sampling probability
self.img_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.core = LSTMCore(opt)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
def init_hidden(self, bsz):
weight = self.logit.weight
if self.rnn_type == 'lstm':
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
else:
return weight.new_zeros(self.num_layers, bsz, self.rnn_size)
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
batch_size = fc_feats.size(0)
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = []
if seq_per_img > 1:
fc_feats = utils.repeat_tensors(seq_per_img, fc_feats)
for i in range(seq.size(1) + 1):
if i == 0:
xt = self.img_embed(fc_feats)
else:
if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.data.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i-1].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i-1].data.clone()
#prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
#it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i-1].clone()
# break if all the sequences end
if i >= 2 and seq[:, i-1].sum() == 0:
break
xt = self.embed(it)
output, state = self.core(xt, state)
output = F.log_softmax(self.logit(output), dim=1)
outputs.append(output)
return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
def get_logprobs_state(self, it, state):
# 'it' is contains a word index
xt = self.embed(it)
output, state = self.core(xt, state)
logprobs = F.log_softmax(self.logit(output), dim=1)
return logprobs, state
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
for t in range(2):
if t == 0:
xt = self.img_embed(fc_feats[k:k+1]).expand(beam_size, self.input_encoding_size)
elif t == 1: # input <bos>
it = fc_feats.data.new(beam_size).long().zero_()
xt = self.embed(it)
output, state = self.core(xt, state)
logprobs = F.log_softmax(self.logit(output), dim=1)
self.done_beams[k] = self.beam_search(state, logprobs, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
seq = fc_feats.new_zeros(batch_size, self.seq_length, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 2):
if t == 0:
xt = self.img_embed(fc_feats)
else:
if t == 1: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
xt = self.embed(it)
output, state = self.core(xt, state)
logprobs = F.log_softmax(self.logit(output), dim=1)
# sample the next_word
if t == self.seq_length + 1: # skip if we achieve maximum length
break
if sample_method == 'greedy':
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1).to(logprobs.device)
sampleLogprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
if t >= 1:
# stop when all finished
if t == 1:
unfinished = it > 0
else:
unfinished = unfinished & (it > 0)
it = it * unfinished.type_as(it)
seq[:,t-1] = it #seq[t] the input of t+2 time step
seqLogprobs[:,t-1] = sampleLogprobs.view(-1)
if unfinished.sum() == 0:
break
return seq, seqLogprobs
|
connect-caption-and-trace-main
|
captioning/models/FCModel.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.